2024-12-12 20:36:10,095 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-12 20:36:10,150 main DEBUG Took 0.036082 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-12 20:36:10,151 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-12 20:36:10,152 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-12 20:36:10,156 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-12 20:36:10,158 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,178 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-12 20:36:10,222 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,232 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,235 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,238 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,243 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,244 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,253 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,254 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,261 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,262 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,268 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,269 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,270 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,271 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,280 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,281 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,283 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,284 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,292 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,293 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,294 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,305 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,308 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,310 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 20:36:10,313 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,313 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-12 20:36:10,321 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 20:36:10,324 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-12 20:36:10,352 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-12 20:36:10,353 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-12 20:36:10,368 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-12 20:36:10,369 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-12 20:36:10,401 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-12 20:36:10,420 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-12 20:36:10,423 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-12 20:36:10,423 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-12 20:36:10,424 main DEBUG createAppenders(={Console}) 2024-12-12 20:36:10,425 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 initialized 2024-12-12 20:36:10,425 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-12 20:36:10,425 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 OK. 2024-12-12 20:36:10,426 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-12 20:36:10,426 main DEBUG OutputStream closed 2024-12-12 20:36:10,427 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-12 20:36:10,427 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-12 20:36:10,427 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@57cf54e1 OK 2024-12-12 20:36:10,680 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-12 20:36:10,684 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-12 20:36:10,686 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-12 20:36:10,688 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-12 20:36:10,691 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-12 20:36:10,697 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-12 20:36:10,698 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-12 20:36:10,698 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-12 20:36:10,700 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-12 20:36:10,701 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-12 20:36:10,701 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-12 20:36:10,702 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-12 20:36:10,703 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-12 20:36:10,704 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-12 20:36:10,704 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-12 20:36:10,705 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-12 20:36:10,706 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-12 20:36:10,707 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-12 20:36:10,711 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12 20:36:10,712 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@29ca3d04) with optional ClassLoader: null 2024-12-12 20:36:10,712 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-12 20:36:10,713 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@29ca3d04] started OK. 2024-12-12T20:36:10,730 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-12 20:36:10,734 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-12 20:36:10,734 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12T20:36:11,365 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30 2024-12-12T20:36:11,365 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-12T20:36:11,510 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-12T20:36:12,093 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-12T20:36:12,094 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47, deleteOnExit=true 2024-12-12T20:36:12,094 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-12T20:36:12,095 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/test.cache.data in system properties and HBase conf 2024-12-12T20:36:12,107 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.tmp.dir in system properties and HBase conf 2024-12-12T20:36:12,108 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir in system properties and HBase conf 2024-12-12T20:36:12,109 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-12T20:36:12,110 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-12T20:36:12,111 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-12T20:36:12,314 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-12T20:36:12,335 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-12T20:36:12,354 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-12T20:36:12,359 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-12T20:36:12,360 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T20:36:12,361 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-12T20:36:12,362 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-12T20:36:12,369 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T20:36:12,374 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T20:36:12,378 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-12T20:36:12,378 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/nfs.dump.dir in system properties and HBase conf 2024-12-12T20:36:12,379 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/java.io.tmpdir in system properties and HBase conf 2024-12-12T20:36:12,381 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T20:36:12,382 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-12T20:36:12,383 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-12T20:36:14,506 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-12T20:36:14,682 INFO [Time-limited test {}] log.Log(170): Logging initialized @5745ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-12T20:36:14,836 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:14,944 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T20:36:15,084 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T20:36:15,084 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T20:36:15,091 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T20:36:15,168 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:15,189 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1caa172f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,AVAILABLE} 2024-12-12T20:36:15,191 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34c62ed9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T20:36:15,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4addcb12{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/java.io.tmpdir/jetty-localhost-33397-hadoop-hdfs-3_4_1-tests_jar-_-any-13607072781368894196/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T20:36:15,688 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47697099{HTTP/1.1, (http/1.1)}{localhost:33397} 2024-12-12T20:36:15,695 INFO [Time-limited test {}] server.Server(415): Started @6753ms 2024-12-12T20:36:16,820 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:16,831 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T20:36:16,849 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T20:36:16,849 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T20:36:16,849 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T20:36:16,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77fcc65c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,AVAILABLE} 2024-12-12T20:36:16,864 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f65c51d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T20:36:17,048 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7725692e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/java.io.tmpdir/jetty-localhost-44175-hadoop-hdfs-3_4_1-tests_jar-_-any-1948130277654374324/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T20:36:17,050 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40605cbf{HTTP/1.1, (http/1.1)}{localhost:44175} 2024-12-12T20:36:17,051 INFO [Time-limited test {}] server.Server(415): Started @8116ms 2024-12-12T20:36:17,133 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T20:36:17,393 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:17,411 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T20:36:17,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T20:36:17,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T20:36:17,433 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T20:36:17,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c62626f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,AVAILABLE} 2024-12-12T20:36:17,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2536e78d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T20:36:17,589 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b749234{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/java.io.tmpdir/jetty-localhost-43199-hadoop-hdfs-3_4_1-tests_jar-_-any-605158270058539028/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T20:36:17,590 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70160452{HTTP/1.1, (http/1.1)}{localhost:43199} 2024-12-12T20:36:17,590 INFO [Time-limited test {}] server.Server(415): Started @8655ms 2024-12-12T20:36:17,594 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T20:36:17,676 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:17,697 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T20:36:17,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T20:36:17,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T20:36:17,727 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T20:36:17,739 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69723e75{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,AVAILABLE} 2024-12-12T20:36:17,740 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78feedc7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T20:36:17,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@48db9f4a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/java.io.tmpdir/jetty-localhost-38249-hadoop-hdfs-3_4_1-tests_jar-_-any-17971447792554197429/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T20:36:17,904 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@440e14e3{HTTP/1.1, (http/1.1)}{localhost:38249} 2024-12-12T20:36:17,905 INFO [Time-limited test {}] server.Server(415): Started @8969ms 2024-12-12T20:36:17,908 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T20:36:18,891 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1/current/BP-1420643299-172.17.0.2-1734035773582/current, will proceed with Du for space computation calculation, 2024-12-12T20:36:18,891 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3/current/BP-1420643299-172.17.0.2-1734035773582/current, will proceed with Du for space computation calculation, 2024-12-12T20:36:18,891 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2/current/BP-1420643299-172.17.0.2-1734035773582/current, will proceed with Du for space computation calculation, 2024-12-12T20:36:18,891 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4/current/BP-1420643299-172.17.0.2-1734035773582/current, will proceed with Du for space computation calculation, 2024-12-12T20:36:19,041 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T20:36:19,044 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T20:36:19,126 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x272f181a488a14f8 with lease ID 0xaa8a3721c84044d: Processing first storage report for DS-cecc7eff-c8bb-4519-8f64-9f1b90360e48 from datanode DatanodeRegistration(127.0.0.1:32781, datanodeUuid=f388b13f-08be-4585-a5ba-acb0b5f227dc, infoPort=44403, infoSecurePort=0, ipcPort=35687, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582) 2024-12-12T20:36:19,128 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x272f181a488a14f8 with lease ID 0xaa8a3721c84044d: from storage DS-cecc7eff-c8bb-4519-8f64-9f1b90360e48 node DatanodeRegistration(127.0.0.1:32781, datanodeUuid=f388b13f-08be-4585-a5ba-acb0b5f227dc, infoPort=44403, infoSecurePort=0, ipcPort=35687, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-12T20:36:19,131 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41adf4ddc859f265 with lease ID 0xaa8a3721c84044c: Processing first storage report for DS-79b11db8-af1e-4efe-8dc8-1996e99cfd7a from datanode DatanodeRegistration(127.0.0.1:41061, datanodeUuid=1c167605-940f-454c-99b2-09758889a2c6, infoPort=43301, infoSecurePort=0, ipcPort=44071, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582) 2024-12-12T20:36:19,132 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41adf4ddc859f265 with lease ID 0xaa8a3721c84044c: from storage DS-79b11db8-af1e-4efe-8dc8-1996e99cfd7a node DatanodeRegistration(127.0.0.1:41061, datanodeUuid=1c167605-940f-454c-99b2-09758889a2c6, infoPort=43301, infoSecurePort=0, ipcPort=44071, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-12T20:36:19,132 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x272f181a488a14f8 with lease ID 0xaa8a3721c84044d: Processing first storage report for DS-ca3e3f01-4973-43c6-b22d-855e2caa5a67 from datanode DatanodeRegistration(127.0.0.1:32781, datanodeUuid=f388b13f-08be-4585-a5ba-acb0b5f227dc, infoPort=44403, infoSecurePort=0, ipcPort=35687, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582) 2024-12-12T20:36:19,132 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x272f181a488a14f8 with lease ID 0xaa8a3721c84044d: from storage DS-ca3e3f01-4973-43c6-b22d-855e2caa5a67 node DatanodeRegistration(127.0.0.1:32781, datanodeUuid=f388b13f-08be-4585-a5ba-acb0b5f227dc, infoPort=44403, infoSecurePort=0, ipcPort=35687, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T20:36:19,133 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41adf4ddc859f265 with lease ID 0xaa8a3721c84044c: Processing first storage report for DS-f0bb1133-6847-4beb-878f-b9b53ce0ae67 from datanode DatanodeRegistration(127.0.0.1:41061, datanodeUuid=1c167605-940f-454c-99b2-09758889a2c6, infoPort=43301, infoSecurePort=0, ipcPort=44071, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582) 2024-12-12T20:36:19,133 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41adf4ddc859f265 with lease ID 0xaa8a3721c84044c: from storage DS-f0bb1133-6847-4beb-878f-b9b53ce0ae67 node DatanodeRegistration(127.0.0.1:41061, datanodeUuid=1c167605-940f-454c-99b2-09758889a2c6, infoPort=43301, infoSecurePort=0, ipcPort=44071, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T20:36:19,189 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5/current/BP-1420643299-172.17.0.2-1734035773582/current, will proceed with Du for space computation calculation, 2024-12-12T20:36:19,190 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6/current/BP-1420643299-172.17.0.2-1734035773582/current, will proceed with Du for space computation calculation, 2024-12-12T20:36:19,219 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T20:36:19,225 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf22431e5e354d13a with lease ID 0xaa8a3721c84044e: Processing first storage report for DS-75fb073b-e525-4495-a856-9d5b7f6012aa from datanode DatanodeRegistration(127.0.0.1:36349, datanodeUuid=ae32363f-ed8f-43e2-b299-eff2c4d0d5e1, infoPort=46299, infoSecurePort=0, ipcPort=36029, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582) 2024-12-12T20:36:19,225 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf22431e5e354d13a with lease ID 0xaa8a3721c84044e: from storage DS-75fb073b-e525-4495-a856-9d5b7f6012aa node DatanodeRegistration(127.0.0.1:36349, datanodeUuid=ae32363f-ed8f-43e2-b299-eff2c4d0d5e1, infoPort=46299, infoSecurePort=0, ipcPort=36029, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-12T20:36:19,226 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf22431e5e354d13a with lease ID 0xaa8a3721c84044e: Processing first storage report for DS-43a62af5-985a-4898-81ad-b6464f56cf01 from datanode DatanodeRegistration(127.0.0.1:36349, datanodeUuid=ae32363f-ed8f-43e2-b299-eff2c4d0d5e1, infoPort=46299, infoSecurePort=0, ipcPort=36029, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582) 2024-12-12T20:36:19,226 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf22431e5e354d13a with lease ID 0xaa8a3721c84044e: from storage DS-43a62af5-985a-4898-81ad-b6464f56cf01 node DatanodeRegistration(127.0.0.1:36349, datanodeUuid=ae32363f-ed8f-43e2-b299-eff2c4d0d5e1, infoPort=46299, infoSecurePort=0, ipcPort=36029, storageInfo=lv=-57;cid=testClusterID;nsid=1533585134;c=1734035773582), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T20:36:19,299 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30 2024-12-12T20:36:19,413 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/zookeeper_0, clientPort=52655, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-12T20:36:19,428 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=52655 2024-12-12T20:36:19,447 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T20:36:19,450 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T20:36:19,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741825_1001 (size=7) 2024-12-12T20:36:19,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741825_1001 (size=7) 2024-12-12T20:36:19,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741825_1001 (size=7) 2024-12-12T20:36:19,958 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 with version=8 2024-12-12T20:36:19,958 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/hbase-staging 2024-12-12T20:36:20,331 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-12T20:36:20,752 INFO [Time-limited test {}] client.ConnectionUtils(129): master/0a737713f6e3:0 server-side Connection retries=45 2024-12-12T20:36:20,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:20,790 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:20,790 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T20:36:20,790 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:20,791 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T20:36:21,048 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T20:36:21,136 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-12T20:36:21,149 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-12T20:36:21,154 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T20:36:21,192 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 50803 (auto-detected) 2024-12-12T20:36:21,194 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-12T20:36:21,217 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:43067 2024-12-12T20:36:21,228 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T20:36:21,231 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T20:36:21,247 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:43067 connecting to ZooKeeper ensemble=127.0.0.1:52655 2024-12-12T20:36:21,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:430670x0, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T20:36:21,342 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43067-0x1001bf54bb60000 connected 2024-12-12T20:36:21,552 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T20:36:21,556 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T20:36:21,577 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T20:36:21,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43067 2024-12-12T20:36:21,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43067 2024-12-12T20:36:21,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43067 2024-12-12T20:36:21,607 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43067 2024-12-12T20:36:21,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43067 2024-12-12T20:36:21,628 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427, hbase.cluster.distributed=false 2024-12-12T20:36:21,745 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/0a737713f6e3:0 server-side Connection retries=45 2024-12-12T20:36:21,745 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:21,746 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:21,746 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T20:36:21,747 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:21,747 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T20:36:21,752 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T20:36:21,758 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T20:36:21,760 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42407 2024-12-12T20:36:21,764 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T20:36:21,781 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T20:36:21,784 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T20:36:21,808 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T20:36:21,824 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42407 connecting to ZooKeeper ensemble=127.0.0.1:52655 2024-12-12T20:36:21,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:424070x0, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T20:36:21,867 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T20:36:21,868 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42407-0x1001bf54bb60001 connected 2024-12-12T20:36:21,886 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T20:36:21,901 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T20:36:21,947 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42407 2024-12-12T20:36:21,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42407 2024-12-12T20:36:21,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42407 2024-12-12T20:36:21,983 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42407 2024-12-12T20:36:21,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42407 2024-12-12T20:36:22,036 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/0a737713f6e3:0 server-side Connection retries=45 2024-12-12T20:36:22,037 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:22,037 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:22,041 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T20:36:22,041 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:22,041 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T20:36:22,041 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T20:36:22,042 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T20:36:22,052 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46859 2024-12-12T20:36:22,055 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T20:36:22,091 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T20:36:22,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T20:36:22,097 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T20:36:22,118 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46859 connecting to ZooKeeper ensemble=127.0.0.1:52655 2024-12-12T20:36:22,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:468590x0, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T20:36:22,167 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46859-0x1001bf54bb60002 connected 2024-12-12T20:36:22,177 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T20:36:22,186 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T20:36:22,188 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T20:36:22,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46859 2024-12-12T20:36:22,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46859 2024-12-12T20:36:22,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46859 2024-12-12T20:36:22,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46859 2024-12-12T20:36:22,287 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46859 2024-12-12T20:36:22,357 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/0a737713f6e3:0 server-side Connection retries=45 2024-12-12T20:36:22,357 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:22,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:22,358 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T20:36:22,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T20:36:22,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T20:36:22,358 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T20:36:22,359 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T20:36:22,378 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36817 2024-12-12T20:36:22,380 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T20:36:22,403 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T20:36:22,406 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T20:36:22,412 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T20:36:22,422 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36817 connecting to ZooKeeper ensemble=127.0.0.1:52655 2024-12-12T20:36:22,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:368170x0, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T20:36:22,438 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:368170x0, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T20:36:22,451 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:368170x0, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T20:36:22,452 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36817-0x1001bf54bb60003 connected 2024-12-12T20:36:22,464 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T20:36:22,487 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36817 2024-12-12T20:36:22,493 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36817 2024-12-12T20:36:22,523 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36817 2024-12-12T20:36:22,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36817 2024-12-12T20:36:22,539 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36817 2024-12-12T20:36:22,557 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/0a737713f6e3,43067,1734035780320 2024-12-12T20:36:22,585 DEBUG [M:0;0a737713f6e3:43067 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0a737713f6e3:43067 2024-12-12T20:36:22,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T20:36:22,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T20:36:22,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T20:36:22,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T20:36:22,598 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0a737713f6e3,43067,1734035780320 2024-12-12T20:36:22,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T20:36:22,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:22,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T20:36:22,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:22,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T20:36:22,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T20:36:22,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:22,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:22,653 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T20:36:22,656 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T20:36:22,660 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0a737713f6e3,43067,1734035780320 from backup master directory 2024-12-12T20:36:22,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T20:36:22,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T20:36:22,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0a737713f6e3,43067,1734035780320 2024-12-12T20:36:22,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T20:36:22,674 WARN [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T20:36:22,674 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0a737713f6e3,43067,1734035780320 2024-12-12T20:36:22,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T20:36:22,678 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-12T20:36:22,680 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-12T20:36:22,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741826_1002 (size=42) 2024-12-12T20:36:22,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741826_1002 (size=42) 2024-12-12T20:36:22,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741826_1002 (size=42) 2024-12-12T20:36:22,893 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/hbase.id with ID: 95abce6c-1807-4389-b6f9-ab16baa2ef7e 2024-12-12T20:36:23,011 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T20:36:23,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:23,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:23,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:23,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:23,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741827_1003 (size=196) 2024-12-12T20:36:23,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741827_1003 (size=196) 2024-12-12T20:36:23,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741827_1003 (size=196) 2024-12-12T20:36:23,193 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:36:23,195 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-12T20:36:23,224 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T20:36:23,231 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T20:36:23,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741828_1004 (size=1189) 2024-12-12T20:36:23,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741828_1004 (size=1189) 2024-12-12T20:36:23,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741828_1004 (size=1189) 2024-12-12T20:36:23,359 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/data/master/store 2024-12-12T20:36:23,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741829_1005 (size=34) 2024-12-12T20:36:23,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741829_1005 (size=34) 2024-12-12T20:36:23,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741829_1005 (size=34) 2024-12-12T20:36:23,469 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-12T20:36:23,470 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:23,472 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T20:36:23,472 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T20:36:23,473 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T20:36:23,473 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T20:36:23,473 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T20:36:23,473 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T20:36:23,473 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T20:36:23,484 WARN [master/0a737713f6e3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/data/master/store/.initializing 2024-12-12T20:36:23,485 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/WALs/0a737713f6e3,43067,1734035780320 2024-12-12T20:36:23,510 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T20:36:23,542 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0a737713f6e3%2C43067%2C1734035780320, suffix=, logDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/WALs/0a737713f6e3,43067,1734035780320, archiveDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/oldWALs, maxLogs=10 2024-12-12T20:36:23,585 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/WALs/0a737713f6e3,43067,1734035780320/0a737713f6e3%2C43067%2C1734035780320.1734035783556, exclude list is [], retry=0 2024-12-12T20:36:23,625 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41061,DS-79b11db8-af1e-4efe-8dc8-1996e99cfd7a,DISK] 2024-12-12T20:36:23,626 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32781,DS-cecc7eff-c8bb-4519-8f64-9f1b90360e48,DISK] 2024-12-12T20:36:23,632 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36349,DS-75fb073b-e525-4495-a856-9d5b7f6012aa,DISK] 2024-12-12T20:36:23,632 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-12T20:36:23,708 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/WALs/0a737713f6e3,43067,1734035780320/0a737713f6e3%2C43067%2C1734035780320.1734035783556 2024-12-12T20:36:23,716 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46299:46299),(127.0.0.1/127.0.0.1:44403:44403),(127.0.0.1/127.0.0.1:43301:43301)] 2024-12-12T20:36:23,717 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-12T20:36:23,728 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:23,745 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T20:36:23,747 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T20:36:23,838 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T20:36:23,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-12T20:36:23,882 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:23,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T20:36:23,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T20:36:23,901 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-12T20:36:23,901 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:23,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:36:23,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T20:36:23,926 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-12T20:36:23,927 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:23,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:36:23,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T20:36:23,936 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-12T20:36:23,941 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:23,945 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:36:23,951 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T20:36:23,955 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T20:36:23,982 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T20:36:24,005 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T20:36:24,055 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:36:24,060 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73244269, jitterRate=0.09142465889453888}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T20:36:24,069 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T20:36:24,070 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-12T20:36:24,163 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f2fb9c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:36:24,287 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-12T20:36:24,307 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-12T20:36:24,308 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-12T20:36:24,319 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-12T20:36:24,324 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 5 msec 2024-12-12T20:36:24,333 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 8 msec 2024-12-12T20:36:24,334 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-12T20:36:24,415 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-12T20:36:24,443 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-12T20:36:24,509 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-12T20:36:24,513 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-12T20:36:24,515 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-12T20:36:24,541 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-12T20:36:24,546 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-12T20:36:24,556 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-12T20:36:24,570 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-12T20:36:24,575 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-12T20:36:24,591 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-12T20:36:24,632 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-12T20:36:24,641 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-12T20:36:24,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T20:36:24,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T20:36:24,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T20:36:24,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T20:36:24,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,661 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=0a737713f6e3,43067,1734035780320, sessionid=0x1001bf54bb60000, setting cluster-up flag (Was=false) 2024-12-12T20:36:24,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,779 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-12T20:36:24,788 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0a737713f6e3,43067,1734035780320 2024-12-12T20:36:24,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:24,883 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-12T20:36:24,911 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0a737713f6e3,43067,1734035780320 2024-12-12T20:36:25,008 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;0a737713f6e3:36817 2024-12-12T20:36:25,011 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0a737713f6e3:42407 2024-12-12T20:36:25,018 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1008): ClusterId : 95abce6c-1807-4389-b6f9-ab16baa2ef7e 2024-12-12T20:36:25,019 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;0a737713f6e3:46859 2024-12-12T20:36:25,020 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1008): ClusterId : 95abce6c-1807-4389-b6f9-ab16baa2ef7e 2024-12-12T20:36:25,022 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T20:36:25,027 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1008): ClusterId : 95abce6c-1807-4389-b6f9-ab16baa2ef7e 2024-12-12T20:36:25,027 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T20:36:25,027 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T20:36:25,059 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T20:36:25,059 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T20:36:25,067 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T20:36:25,067 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T20:36:25,075 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T20:36:25,075 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T20:36:25,075 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T20:36:25,076 DEBUG [RS:0;0a737713f6e3:42407 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@380513a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:36:25,079 DEBUG [RS:0;0a737713f6e3:42407 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76b6a4a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0a737713f6e3/172.17.0.2:0 2024-12-12T20:36:25,083 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] master.HMaster(3390): Registered master coprocessor service: service=AccessControlService 2024-12-12T20:36:25,085 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T20:36:25,086 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-12T20:36:25,086 DEBUG [RS:2;0a737713f6e3:36817 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76e4aa05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:36:25,086 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-12T20:36:25,087 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T20:36:25,087 DEBUG [RS:2;0a737713f6e3:36817 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63061fb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0a737713f6e3/172.17.0.2:0 2024-12-12T20:36:25,087 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-12T20:36:25,087 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-12T20:36:25,087 DEBUG [RS:1;0a737713f6e3:46859 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5351fdc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:36:25,092 DEBUG [RS:1;0a737713f6e3:46859 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15efcdbc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0a737713f6e3/172.17.0.2:0 2024-12-12T20:36:25,093 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-12T20:36:25,093 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-12T20:36:25,115 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-12T20:36:25,117 INFO [RS:1;0a737713f6e3:46859 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:36:25,117 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-12T20:36:25,118 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-12T20:36:25,118 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-12T20:36:25,118 INFO [RS:2;0a737713f6e3:36817 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:36:25,118 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:36:25,118 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-12T20:36:25,119 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-12T20:36:25,119 INFO [RS:0;0a737713f6e3:42407 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:36:25,119 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-12T20:36:25,121 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(3073): reportForDuty to master=0a737713f6e3,43067,1734035780320 with isa=0a737713f6e3/172.17.0.2:46859, startcode=1734035782031 2024-12-12T20:36:25,124 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(3073): reportForDuty to master=0a737713f6e3,43067,1734035780320 with isa=0a737713f6e3/172.17.0.2:36817, startcode=1734035782356 2024-12-12T20:36:25,124 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(3073): reportForDuty to master=0a737713f6e3,43067,1734035780320 with isa=0a737713f6e3/172.17.0.2:42407, startcode=1734035781742 2024-12-12T20:36:25,150 DEBUG [RS:1;0a737713f6e3:46859 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T20:36:25,151 DEBUG [RS:2;0a737713f6e3:36817 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T20:36:25,159 DEBUG [RS:0;0a737713f6e3:42407 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T20:36:25,290 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58947, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T20:36:25,293 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60013, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T20:36:25,294 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51827, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T20:36:25,303 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T20:36:25,317 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-12T20:36:25,320 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T20:36:25,322 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T20:36:25,341 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-12T20:36:25,359 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-12T20:36:25,359 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-12T20:36:25,363 WARN [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-12T20:36:25,365 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-12T20:36:25,366 WARN [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-12T20:36:25,367 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-12T20:36:25,367 WARN [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-12T20:36:25,371 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0a737713f6e3,43067,1734035780320 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-12T20:36:25,377 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0a737713f6e3:0, corePoolSize=5, maxPoolSize=5 2024-12-12T20:36:25,378 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0a737713f6e3:0, corePoolSize=5, maxPoolSize=5 2024-12-12T20:36:25,378 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0a737713f6e3:0, corePoolSize=5, maxPoolSize=5 2024-12-12T20:36:25,378 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0a737713f6e3:0, corePoolSize=5, maxPoolSize=5 2024-12-12T20:36:25,379 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0a737713f6e3:0, corePoolSize=10, maxPoolSize=10 2024-12-12T20:36:25,379 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,379 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0a737713f6e3:0, corePoolSize=2, maxPoolSize=2 2024-12-12T20:36:25,379 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,425 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-12T20:36:25,426 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-12T20:36:25,437 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734035815437 2024-12-12T20:36:25,441 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-12T20:36:25,443 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-12T20:36:25,450 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:25,451 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T20:36:25,459 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-12T20:36:25,462 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-12T20:36:25,462 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-12T20:36:25,463 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-12T20:36:25,468 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(3073): reportForDuty to master=0a737713f6e3,43067,1734035780320 with isa=0a737713f6e3/172.17.0.2:36817, startcode=1734035782356 2024-12-12T20:36:25,471 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(3073): reportForDuty to master=0a737713f6e3,43067,1734035780320 with isa=0a737713f6e3/172.17.0.2:42407, startcode=1734035781742 2024-12-12T20:36:25,474 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(3073): reportForDuty to master=0a737713f6e3,43067,1734035780320 with isa=0a737713f6e3/172.17.0.2:46859, startcode=1734035782031 2024-12-12T20:36:25,473 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T20:36:25,477 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T20:36:25,480 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:25,479 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T20:36:25,480 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-12T20:36:25,480 WARN [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 200 ms and then retrying. 2024-12-12T20:36:25,481 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-12T20:36:25,481 WARN [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 200 ms and then retrying. 2024-12-12T20:36:25,483 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-12T20:36:25,483 WARN [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 200 ms and then retrying. 2024-12-12T20:36:25,500 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-12T20:36:25,502 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-12T20:36:25,502 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-12T20:36:25,523 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-12T20:36:25,524 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-12T20:36:25,536 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0a737713f6e3:0:becomeActiveMaster-HFileCleaner.large.0-1734035785526,5,FailOnTimeoutGroup] 2024-12-12T20:36:25,538 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0a737713f6e3:0:becomeActiveMaster-HFileCleaner.small.0-1734035785536,5,FailOnTimeoutGroup] 2024-12-12T20:36:25,538 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:25,538 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-12T20:36:25,540 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:25,541 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:25,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741831_1007 (size=1039) 2024-12-12T20:36:25,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741831_1007 (size=1039) 2024-12-12T20:36:25,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741831_1007 (size=1039) 2024-12-12T20:36:25,619 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-12T20:36:25,619 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:36:25,684 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(3073): reportForDuty to master=0a737713f6e3,43067,1734035780320 with isa=0a737713f6e3/172.17.0.2:42407, startcode=1734035781742 2024-12-12T20:36:25,686 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 0a737713f6e3,42407,1734035781742 2024-12-12T20:36:25,687 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(3073): reportForDuty to master=0a737713f6e3,43067,1734035780320 with isa=0a737713f6e3/172.17.0.2:46859, startcode=1734035782031 2024-12-12T20:36:25,693 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] master.ServerManager(486): Registering regionserver=0a737713f6e3,42407,1734035781742 2024-12-12T20:36:25,695 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(3073): reportForDuty to master=0a737713f6e3,43067,1734035780320 with isa=0a737713f6e3/172.17.0.2:36817, startcode=1734035782356 2024-12-12T20:36:25,727 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:36:25,727 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:37615 2024-12-12T20:36:25,727 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-12T20:36:25,732 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 0a737713f6e3,46859,1734035782031 2024-12-12T20:36:25,732 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] master.ServerManager(486): Registering regionserver=0a737713f6e3,46859,1734035782031 2024-12-12T20:36:25,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741832_1008 (size=32) 2024-12-12T20:36:25,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741832_1008 (size=32) 2024-12-12T20:36:25,742 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:25,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T20:36:25,746 DEBUG [RS:0;0a737713f6e3:42407 {}] zookeeper.ZKUtil(111): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0a737713f6e3,42407,1734035781742 2024-12-12T20:36:25,746 WARN [RS:0;0a737713f6e3:42407 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T20:36:25,746 INFO [RS:0;0a737713f6e3:42407 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T20:36:25,747 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,42407,1734035781742 2024-12-12T20:36:25,750 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:36:25,750 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:37615 2024-12-12T20:36:25,750 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-12T20:36:25,751 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 0a737713f6e3,36817,1734035782356 2024-12-12T20:36:25,751 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] master.ServerManager(486): Registering regionserver=0a737713f6e3,36817,1734035782356 2024-12-12T20:36:25,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741832_1008 (size=32) 2024-12-12T20:36:25,760 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0a737713f6e3,42407,1734035781742] 2024-12-12T20:36:25,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T20:36:25,775 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T20:36:25,779 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:36:25,776 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:25,779 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:37615 2024-12-12T20:36:25,779 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-12T20:36:25,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T20:36:25,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T20:36:25,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T20:36:25,795 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T20:36:25,796 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:25,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T20:36:25,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T20:36:25,808 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0a737713f6e3,46859,1734035782031] 2024-12-12T20:36:25,810 DEBUG [RS:2;0a737713f6e3:36817 {}] zookeeper.ZKUtil(111): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0a737713f6e3,36817,1734035782356 2024-12-12T20:36:25,808 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0a737713f6e3,36817,1734035782356] 2024-12-12T20:36:25,811 DEBUG [RS:1;0a737713f6e3:46859 {}] zookeeper.ZKUtil(111): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0a737713f6e3,46859,1734035782031 2024-12-12T20:36:25,810 WARN [RS:2;0a737713f6e3:36817 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T20:36:25,811 WARN [RS:1;0a737713f6e3:46859 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T20:36:25,811 INFO [RS:1;0a737713f6e3:46859 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T20:36:25,811 INFO [RS:2;0a737713f6e3:36817 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T20:36:25,813 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,36817,1734035782356 2024-12-12T20:36:25,813 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,46859,1734035782031 2024-12-12T20:36:25,817 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T20:36:25,818 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:25,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T20:36:25,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740 2024-12-12T20:36:25,834 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-12T20:36:25,838 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740 2024-12-12T20:36:25,860 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-12T20:36:25,868 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-12T20:36:25,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-12T20:36:25,887 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T20:36:25,887 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-12T20:36:25,895 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T20:36:25,915 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T20:36:25,918 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:36:25,922 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59333452, jitterRate=-0.11586266756057739}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-12T20:36:25,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-12T20:36:25,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-12T20:36:25,929 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-12T20:36:25,930 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-12T20:36:25,930 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T20:36:25,930 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T20:36:25,937 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T20:36:25,947 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T20:36:25,951 INFO [RS:2;0a737713f6e3:36817 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T20:36:25,951 INFO [RS:2;0a737713f6e3:36817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:25,952 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-12T20:36:25,953 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-12T20:36:25,954 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T20:36:25,958 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-12T20:36:25,974 INFO [RS:1;0a737713f6e3:46859 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T20:36:25,974 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-12T20:36:25,974 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-12T20:36:25,974 INFO [RS:1;0a737713f6e3:46859 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:25,981 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-12T20:36:25,985 INFO [RS:0;0a737713f6e3:42407 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T20:36:25,985 INFO [RS:0;0a737713f6e3:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:25,986 INFO [RS:1;0a737713f6e3:46859 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:25,986 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,987 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,987 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,987 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,987 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,987 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0a737713f6e3:0, corePoolSize=2, maxPoolSize=2 2024-12-12T20:36:25,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-12T20:36:25,988 INFO [RS:2;0a737713f6e3:36817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:25,988 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,988 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,988 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,989 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,989 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,989 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0a737713f6e3:0, corePoolSize=2, maxPoolSize=2 2024-12-12T20:36:25,989 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,989 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,989 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,989 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,990 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,990 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0, corePoolSize=3, maxPoolSize=3 2024-12-12T20:36:25,990 DEBUG [RS:2;0a737713f6e3:36817 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0a737713f6e3:0, corePoolSize=3, maxPoolSize=3 2024-12-12T20:36:25,992 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,992 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,992 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,992 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,992 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,993 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0, corePoolSize=3, maxPoolSize=3 2024-12-12T20:36:25,993 DEBUG [RS:1;0a737713f6e3:46859 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0a737713f6e3:0, corePoolSize=3, maxPoolSize=3 2024-12-12T20:36:25,995 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-12T20:36:25,998 INFO [RS:0;0a737713f6e3:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:25,999 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,999 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,999 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,999 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,999 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:25,999 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0a737713f6e3:0, corePoolSize=2, maxPoolSize=2 2024-12-12T20:36:25,999 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:26,000 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:26,000 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:26,000 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:26,000 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0a737713f6e3:0, corePoolSize=1, maxPoolSize=1 2024-12-12T20:36:26,000 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0, corePoolSize=3, maxPoolSize=3 2024-12-12T20:36:26,000 DEBUG [RS:0;0a737713f6e3:42407 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0a737713f6e3:0, corePoolSize=3, maxPoolSize=3 2024-12-12T20:36:26,006 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-12T20:36:26,015 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-12T20:36:26,017 INFO [RS:1;0a737713f6e3:46859 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,017 INFO [RS:1;0a737713f6e3:46859 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,021 INFO [RS:1;0a737713f6e3:46859 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,021 INFO [RS:1;0a737713f6e3:46859 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,021 INFO [RS:1;0a737713f6e3:46859 {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,46859,1734035782031-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T20:36:26,030 INFO [RS:0;0a737713f6e3:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,030 INFO [RS:0;0a737713f6e3:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,030 INFO [RS:0;0a737713f6e3:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,030 INFO [RS:0;0a737713f6e3:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,030 INFO [RS:0;0a737713f6e3:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,42407,1734035781742-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T20:36:26,039 INFO [RS:2;0a737713f6e3:36817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,040 INFO [RS:2;0a737713f6e3:36817 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,040 INFO [RS:2;0a737713f6e3:36817 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,040 INFO [RS:2;0a737713f6e3:36817 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,040 INFO [RS:2;0a737713f6e3:36817 {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,36817,1734035782356-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T20:36:26,105 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-12T20:36:26,109 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-12T20:36:26,113 INFO [RS:2;0a737713f6e3:36817 {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,36817,1734035782356-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,114 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-12T20:36:26,115 INFO [RS:1;0a737713f6e3:46859 {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,46859,1734035782031-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,115 INFO [RS:0;0a737713f6e3:42407 {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,42407,1734035781742-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:26,166 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.Replication(204): 0a737713f6e3,42407,1734035781742 started 2024-12-12T20:36:26,166 WARN [0a737713f6e3:43067 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-12T20:36:26,166 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1767): Serving as 0a737713f6e3,42407,1734035781742, RpcServer on 0a737713f6e3/172.17.0.2:42407, sessionid=0x1001bf54bb60001 2024-12-12T20:36:26,167 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T20:36:26,168 DEBUG [RS:0;0a737713f6e3:42407 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0a737713f6e3,42407,1734035781742 2024-12-12T20:36:26,168 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0a737713f6e3,42407,1734035781742' 2024-12-12T20:36:26,168 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T20:36:26,169 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T20:36:26,173 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T20:36:26,173 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T20:36:26,173 DEBUG [RS:0;0a737713f6e3:42407 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0a737713f6e3,42407,1734035781742 2024-12-12T20:36:26,177 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0a737713f6e3,42407,1734035781742' 2024-12-12T20:36:26,177 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T20:36:26,183 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.Replication(204): 0a737713f6e3,36817,1734035782356 started 2024-12-12T20:36:26,184 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1767): Serving as 0a737713f6e3,36817,1734035782356, RpcServer on 0a737713f6e3/172.17.0.2:36817, sessionid=0x1001bf54bb60003 2024-12-12T20:36:26,184 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T20:36:26,184 DEBUG [RS:2;0a737713f6e3:36817 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0a737713f6e3,36817,1734035782356 2024-12-12T20:36:26,184 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0a737713f6e3,36817,1734035782356' 2024-12-12T20:36:26,185 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T20:36:26,185 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T20:36:26,188 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.Replication(204): 0a737713f6e3,46859,1734035782031 started 2024-12-12T20:36:26,188 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1767): Serving as 0a737713f6e3,46859,1734035782031, RpcServer on 0a737713f6e3/172.17.0.2:46859, sessionid=0x1001bf54bb60002 2024-12-12T20:36:26,188 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T20:36:26,189 DEBUG [RS:1;0a737713f6e3:46859 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0a737713f6e3,46859,1734035782031 2024-12-12T20:36:26,189 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0a737713f6e3,46859,1734035782031' 2024-12-12T20:36:26,189 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T20:36:26,192 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T20:36:26,195 DEBUG [RS:0;0a737713f6e3:42407 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T20:36:26,195 INFO [RS:0;0a737713f6e3:42407 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T20:36:26,195 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T20:36:26,195 INFO [RS:0;0a737713f6e3:42407 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T20:36:26,195 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T20:36:26,202 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T20:36:26,202 DEBUG [RS:2;0a737713f6e3:36817 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0a737713f6e3,36817,1734035782356 2024-12-12T20:36:26,202 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0a737713f6e3,36817,1734035782356' 2024-12-12T20:36:26,203 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T20:36:26,207 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T20:36:26,207 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T20:36:26,207 DEBUG [RS:1;0a737713f6e3:46859 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0a737713f6e3,46859,1734035782031 2024-12-12T20:36:26,207 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0a737713f6e3,46859,1734035782031' 2024-12-12T20:36:26,207 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T20:36:26,211 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T20:36:26,216 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T20:36:26,218 DEBUG [RS:2;0a737713f6e3:36817 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T20:36:26,218 INFO [RS:2;0a737713f6e3:36817 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T20:36:26,218 INFO [RS:2;0a737713f6e3:36817 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T20:36:26,223 DEBUG [RS:1;0a737713f6e3:46859 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T20:36:26,224 INFO [RS:1;0a737713f6e3:46859 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T20:36:26,224 INFO [RS:1;0a737713f6e3:46859 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T20:36:26,312 INFO [RS:0;0a737713f6e3:42407 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T20:36:26,321 INFO [RS:2;0a737713f6e3:36817 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T20:36:26,325 INFO [RS:1;0a737713f6e3:46859 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T20:36:26,330 INFO [RS:0;0a737713f6e3:42407 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0a737713f6e3%2C42407%2C1734035781742, suffix=, logDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,42407,1734035781742, archiveDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/oldWALs, maxLogs=32 2024-12-12T20:36:26,340 INFO [RS:2;0a737713f6e3:36817 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0a737713f6e3%2C36817%2C1734035782356, suffix=, logDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,36817,1734035782356, archiveDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/oldWALs, maxLogs=32 2024-12-12T20:36:26,344 INFO [RS:1;0a737713f6e3:46859 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0a737713f6e3%2C46859%2C1734035782031, suffix=, logDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,46859,1734035782031, archiveDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/oldWALs, maxLogs=32 2024-12-12T20:36:26,389 DEBUG [RS:2;0a737713f6e3:36817 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,36817,1734035782356/0a737713f6e3%2C36817%2C1734035782356.1734035786342, exclude list is [], retry=0 2024-12-12T20:36:26,401 DEBUG [RS:0;0a737713f6e3:42407 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,42407,1734035781742/0a737713f6e3%2C42407%2C1734035781742.1734035786343, exclude list is [], retry=0 2024-12-12T20:36:26,428 DEBUG [RS:1;0a737713f6e3:46859 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,46859,1734035782031/0a737713f6e3%2C46859%2C1734035782031.1734035786353, exclude list is [], retry=0 2024-12-12T20:36:26,429 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32781,DS-cecc7eff-c8bb-4519-8f64-9f1b90360e48,DISK] 2024-12-12T20:36:26,430 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36349,DS-75fb073b-e525-4495-a856-9d5b7f6012aa,DISK] 2024-12-12T20:36:26,432 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41061,DS-79b11db8-af1e-4efe-8dc8-1996e99cfd7a,DISK] 2024-12-12T20:36:26,441 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41061,DS-79b11db8-af1e-4efe-8dc8-1996e99cfd7a,DISK] 2024-12-12T20:36:26,442 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32781,DS-cecc7eff-c8bb-4519-8f64-9f1b90360e48,DISK] 2024-12-12T20:36:26,442 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36349,DS-75fb073b-e525-4495-a856-9d5b7f6012aa,DISK] 2024-12-12T20:36:26,455 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36349,DS-75fb073b-e525-4495-a856-9d5b7f6012aa,DISK] 2024-12-12T20:36:26,457 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41061,DS-79b11db8-af1e-4efe-8dc8-1996e99cfd7a,DISK] 2024-12-12T20:36:26,507 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32781,DS-cecc7eff-c8bb-4519-8f64-9f1b90360e48,DISK] 2024-12-12T20:36:26,544 INFO [RS:2;0a737713f6e3:36817 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,36817,1734035782356/0a737713f6e3%2C36817%2C1734035782356.1734035786342 2024-12-12T20:36:26,564 INFO [RS:0;0a737713f6e3:42407 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,42407,1734035781742/0a737713f6e3%2C42407%2C1734035781742.1734035786343 2024-12-12T20:36:26,567 DEBUG [RS:0;0a737713f6e3:42407 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44403:44403),(127.0.0.1/127.0.0.1:46299:46299),(127.0.0.1/127.0.0.1:43301:43301)] 2024-12-12T20:36:26,596 DEBUG [RS:2;0a737713f6e3:36817 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44403:44403),(127.0.0.1/127.0.0.1:46299:46299),(127.0.0.1/127.0.0.1:43301:43301)] 2024-12-12T20:36:26,609 INFO [RS:1;0a737713f6e3:46859 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,46859,1734035782031/0a737713f6e3%2C46859%2C1734035782031.1734035786353 2024-12-12T20:36:26,616 DEBUG [RS:1;0a737713f6e3:46859 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46299:46299),(127.0.0.1/127.0.0.1:43301:43301),(127.0.0.1/127.0.0.1:44403:44403)] 2024-12-12T20:36:26,669 DEBUG [0a737713f6e3:43067 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-12T20:36:26,675 DEBUG [0a737713f6e3:43067 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:36:26,700 DEBUG [0a737713f6e3:43067 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:36:26,700 DEBUG [0a737713f6e3:43067 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:36:26,700 DEBUG [0a737713f6e3:43067 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:36:26,701 INFO [0a737713f6e3:43067 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:36:26,701 INFO [0a737713f6e3:43067 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:36:26,701 INFO [0a737713f6e3:43067 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:36:26,701 DEBUG [0a737713f6e3:43067 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:36:26,710 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:36:26,721 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0a737713f6e3,46859,1734035782031, state=OPENING 2024-12-12T20:36:26,739 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-12T20:36:26,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:26,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:26,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:26,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:26,756 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T20:36:26,756 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T20:36:26,759 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T20:36:26,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:36:26,762 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T20:36:26,969 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:36:26,976 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T20:36:26,998 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T20:36:27,035 INFO [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-12T20:36:27,035 INFO [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T20:36:27,044 INFO [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-12T20:36:27,055 INFO [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0a737713f6e3%2C46859%2C1734035782031.meta, suffix=.meta, logDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,46859,1734035782031, archiveDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/oldWALs, maxLogs=32 2024-12-12T20:36:27,095 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,46859,1734035782031/0a737713f6e3%2C46859%2C1734035782031.meta.1734035787058.meta, exclude list is [], retry=0 2024-12-12T20:36:27,119 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36349,DS-75fb073b-e525-4495-a856-9d5b7f6012aa,DISK] 2024-12-12T20:36:27,120 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32781,DS-cecc7eff-c8bb-4519-8f64-9f1b90360e48,DISK] 2024-12-12T20:36:27,127 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41061,DS-79b11db8-af1e-4efe-8dc8-1996e99cfd7a,DISK] 2024-12-12T20:36:27,151 INFO [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/WALs/0a737713f6e3,46859,1734035782031/0a737713f6e3%2C46859%2C1734035782031.meta.1734035787058.meta 2024-12-12T20:36:27,159 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46299:46299),(127.0.0.1/127.0.0.1:44403:44403),(127.0.0.1/127.0.0.1:43301:43301)] 2024-12-12T20:36:27,160 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-12T20:36:27,161 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-12T20:36:27,163 INFO [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:36:27,165 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-12T20:36:27,175 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-12T20:36:27,177 INFO [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-12T20:36:27,198 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-12T20:36:27,199 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:27,199 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-12T20:36:27,199 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-12T20:36:27,209 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T20:36:27,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T20:36:27,221 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:27,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T20:36:27,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T20:36:27,232 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T20:36:27,233 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:27,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T20:36:27,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T20:36:27,244 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T20:36:27,245 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:27,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T20:36:27,251 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740 2024-12-12T20:36:27,257 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740 2024-12-12T20:36:27,268 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-12T20:36:27,276 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-12T20:36:27,282 INFO [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68344508, jitterRate=0.018412530422210693}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-12T20:36:27,287 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-12T20:36:27,309 INFO [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734035786961 2024-12-12T20:36:27,343 DEBUG [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-12T20:36:27,346 INFO [RS_OPEN_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-12T20:36:27,355 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:36:27,365 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0a737713f6e3,46859,1734035782031, state=OPEN 2024-12-12T20:36:27,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T20:36:27,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T20:36:27,376 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T20:36:27,376 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T20:36:27,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T20:36:27,377 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T20:36:27,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T20:36:27,379 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T20:36:27,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-12T20:36:27,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=0a737713f6e3,46859,1734035782031 in 615 msec 2024-12-12T20:36:27,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-12T20:36:27,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.4030 sec 2024-12-12T20:36:27,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 2.3010 sec 2024-12-12T20:36:27,440 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734035787440, completionTime=-1 2024-12-12T20:36:27,441 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-12T20:36:27,441 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-12T20:36:27,542 DEBUG [hconnection-0x2928eebe-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:36:27,548 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50246, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:36:27,588 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-12-12T20:36:27,588 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734035847588 2024-12-12T20:36:27,588 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734035907588 2024-12-12T20:36:27,588 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 147 msec 2024-12-12T20:36:27,705 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:36:27,727 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,43067,1734035780320-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:27,727 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,43067,1734035780320-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:27,728 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,43067,1734035780320-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:27,729 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0a737713f6e3:43067, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:27,730 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:27,747 DEBUG [master/0a737713f6e3:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-12T20:36:27,821 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-12T20:36:27,823 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T20:36:27,842 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-12T20:36:27,851 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:36:27,852 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:27,856 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:36:27,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741837_1013 (size=358) 2024-12-12T20:36:27,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741837_1013 (size=358) 2024-12-12T20:36:27,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741837_1013 (size=358) 2024-12-12T20:36:28,016 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 419ba590b58c1bffeff3e689922ee9a3, NAME => 'hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:36:28,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741838_1014 (size=42) 2024-12-12T20:36:28,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741838_1014 (size=42) 2024-12-12T20:36:28,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741838_1014 (size=42) 2024-12-12T20:36:28,101 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:28,101 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 419ba590b58c1bffeff3e689922ee9a3, disabling compactions & flushes 2024-12-12T20:36:28,101 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:36:28,101 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:36:28,101 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. after waiting 0 ms 2024-12-12T20:36:28,101 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:36:28,101 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:36:28,101 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 419ba590b58c1bffeff3e689922ee9a3: 2024-12-12T20:36:28,106 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:36:28,114 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734035788107"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035788107"}]},"ts":"1734035788107"} 2024-12-12T20:36:28,149 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T20:36:28,152 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:36:28,157 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035788152"}]},"ts":"1734035788152"} 2024-12-12T20:36:28,164 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-12T20:36:28,193 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:36:28,195 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:36:28,195 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:36:28,195 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:36:28,195 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:36:28,196 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:36:28,196 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:36:28,196 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:36:28,198 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=419ba590b58c1bffeff3e689922ee9a3, ASSIGN}] 2024-12-12T20:36:28,210 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=419ba590b58c1bffeff3e689922ee9a3, ASSIGN 2024-12-12T20:36:28,217 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=419ba590b58c1bffeff3e689922ee9a3, ASSIGN; state=OFFLINE, location=0a737713f6e3,46859,1734035782031; forceNewPlan=false, retain=false 2024-12-12T20:36:28,372 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-12T20:36:28,374 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=419ba590b58c1bffeff3e689922ee9a3, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:36:28,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 419ba590b58c1bffeff3e689922ee9a3, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:36:28,560 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:36:28,590 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:36:28,591 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 419ba590b58c1bffeff3e689922ee9a3, NAME => 'hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3.', STARTKEY => '', ENDKEY => ''} 2024-12-12T20:36:28,591 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. service=AccessControlService 2024-12-12T20:36:28,592 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:36:28,592 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 419ba590b58c1bffeff3e689922ee9a3 2024-12-12T20:36:28,592 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:28,593 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 419ba590b58c1bffeff3e689922ee9a3 2024-12-12T20:36:28,593 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 419ba590b58c1bffeff3e689922ee9a3 2024-12-12T20:36:28,596 INFO [StoreOpener-419ba590b58c1bffeff3e689922ee9a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 419ba590b58c1bffeff3e689922ee9a3 2024-12-12T20:36:28,603 INFO [StoreOpener-419ba590b58c1bffeff3e689922ee9a3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 419ba590b58c1bffeff3e689922ee9a3 columnFamilyName info 2024-12-12T20:36:28,603 DEBUG [StoreOpener-419ba590b58c1bffeff3e689922ee9a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:28,604 INFO [StoreOpener-419ba590b58c1bffeff3e689922ee9a3-1 {}] regionserver.HStore(327): Store=419ba590b58c1bffeff3e689922ee9a3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:36:28,606 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/namespace/419ba590b58c1bffeff3e689922ee9a3 2024-12-12T20:36:28,607 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/namespace/419ba590b58c1bffeff3e689922ee9a3 2024-12-12T20:36:28,612 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 419ba590b58c1bffeff3e689922ee9a3 2024-12-12T20:36:28,618 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/namespace/419ba590b58c1bffeff3e689922ee9a3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:36:28,619 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 419ba590b58c1bffeff3e689922ee9a3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64937106, jitterRate=-0.0323617160320282}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:36:28,621 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 419ba590b58c1bffeff3e689922ee9a3: 2024-12-12T20:36:28,625 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3., pid=6, masterSystemTime=1734035788560 2024-12-12T20:36:28,635 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:36:28,635 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:36:28,636 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=419ba590b58c1bffeff3e689922ee9a3, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:36:28,662 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-12T20:36:28,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 419ba590b58c1bffeff3e689922ee9a3, server=0a737713f6e3,46859,1734035782031 in 266 msec 2024-12-12T20:36:28,667 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-12T20:36:28,668 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=419ba590b58c1bffeff3e689922ee9a3, ASSIGN in 464 msec 2024-12-12T20:36:28,671 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:36:28,671 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035788671"}]},"ts":"1734035788671"} 2024-12-12T20:36:28,686 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-12T20:36:28,708 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:36:28,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 884 msec 2024-12-12T20:36:28,767 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-12T20:36:28,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:28,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:28,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-12T20:36:28,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:28,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:28,828 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-12T20:36:28,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-12T20:36:28,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 78 msec 2024-12-12T20:36:28,914 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-12T20:36:28,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-12T20:36:28,991 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 69 msec 2024-12-12T20:36:29,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-12T20:36:29,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-12T20:36:29,058 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 6.384sec 2024-12-12T20:36:29,065 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-12T20:36:29,067 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-12T20:36:29,068 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-12T20:36:29,069 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-12T20:36:29,069 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-12T20:36:29,071 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,43067,1734035780320-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T20:36:29,072 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,43067,1734035780320-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-12T20:36:29,125 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.HMaster$4(2389): Client=null/null create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T20:36:29,139 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:acl 2024-12-12T20:36:29,144 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:36:29,144 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:29,144 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] master.MasterRpcServices(713): Client=null/null procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 9 2024-12-12T20:36:29,147 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:36:29,152 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T20:36:29,171 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x20f4d7a0 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4afc2cc5 2024-12-12T20:36:29,195 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-12T20:36:29,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49a69d67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:36:29,254 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-12T20:36:29,255 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-12T20:36:29,255 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T20:36:29,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741839_1015 (size=349) 2024-12-12T20:36:29,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741839_1015 (size=349) 2024-12-12T20:36:29,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741839_1015 (size=349) 2024-12-12T20:36:29,316 DEBUG [hconnection-0x4431ba26-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:36:29,388 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:36:29,399 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=0a737713f6e3,43067,1734035780320 2024-12-12T20:36:29,399 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2790): Starting mini mapreduce cluster... 2024-12-12T20:36:29,399 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/test.cache.data in system properties and HBase conf 2024-12-12T20:36:29,399 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.tmp.dir in system properties and HBase conf 2024-12-12T20:36:29,400 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir in system properties and HBase conf 2024-12-12T20:36:29,400 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-12T20:36:29,400 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-12T20:36:29,400 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-12T20:36:29,401 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-12T20:36:29,407 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-12T20:36:29,407 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-12T20:36:29,407 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T20:36:29,407 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-12T20:36:29,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-12T20:36:29,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T20:36:29,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T20:36:29,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-12T20:36:29,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/nfs.dump.dir in system properties and HBase conf 2024-12-12T20:36:29,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/java.io.tmpdir in system properties and HBase conf 2024-12-12T20:36:29,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T20:36:29,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-12T20:36:29,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-12T20:36:29,455 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T20:36:29,693 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 039d8ca4d0468f0362e2baff27ee7042, NAME => 'hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:36:29,755 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T20:36:29,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741840_1016 (size=36) 2024-12-12T20:36:29,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741840_1016 (size=36) 2024-12-12T20:36:29,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741840_1016 (size=36) 2024-12-12T20:36:29,811 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:29,811 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1681): Closing 039d8ca4d0468f0362e2baff27ee7042, disabling compactions & flushes 2024-12-12T20:36:29,811 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:36:29,811 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:36:29,811 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. after waiting 0 ms 2024-12-12T20:36:29,811 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:36:29,811 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1922): Closed hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:36:29,811 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 039d8ca4d0468f0362e2baff27ee7042: 2024-12-12T20:36:29,830 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:36:29,830 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1734035789830"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035789830"}]},"ts":"1734035789830"} 2024-12-12T20:36:29,860 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T20:36:29,870 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:36:29,871 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035789870"}]},"ts":"1734035789870"} 2024-12-12T20:36:29,916 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-12T20:36:30,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741841_1017 (size=592039) 2024-12-12T20:36:30,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741841_1017 (size=592039) 2024-12-12T20:36:30,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741841_1017 (size=592039) 2024-12-12T20:36:30,059 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:36:30,062 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:36:30,062 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:36:30,063 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:36:30,063 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:36:30,063 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:36:30,063 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:36:30,063 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:36:30,063 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=039d8ca4d0468f0362e2baff27ee7042, ASSIGN}] 2024-12-12T20:36:30,067 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=039d8ca4d0468f0362e2baff27ee7042, ASSIGN 2024-12-12T20:36:30,070 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:acl, region=039d8ca4d0468f0362e2baff27ee7042, ASSIGN; state=OFFLINE, location=0a737713f6e3,46859,1734035782031; forceNewPlan=false, retain=false 2024-12-12T20:36:30,236 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-12T20:36:30,237 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=039d8ca4d0468f0362e2baff27ee7042, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:36:30,259 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T20:36:30,263 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 039d8ca4d0468f0362e2baff27ee7042, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:36:30,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741842_1018 (size=1663647) 2024-12-12T20:36:30,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741842_1018 (size=1663647) 2024-12-12T20:36:30,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741842_1018 (size=1663647) 2024-12-12T20:36:30,435 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:36:30,461 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(135): Open hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:36:30,462 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 039d8ca4d0468f0362e2baff27ee7042, NAME => 'hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042.', STARTKEY => '', ENDKEY => ''} 2024-12-12T20:36:30,462 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. service=AccessControlService 2024-12-12T20:36:30,462 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:36:30,462 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:36:30,463 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(894): Instantiated hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:30,463 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:36:30,463 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:36:30,476 INFO [StoreOpener-039d8ca4d0468f0362e2baff27ee7042-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:36:30,479 INFO [StoreOpener-039d8ca4d0468f0362e2baff27ee7042-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 039d8ca4d0468f0362e2baff27ee7042 columnFamilyName l 2024-12-12T20:36:30,480 DEBUG [StoreOpener-039d8ca4d0468f0362e2baff27ee7042-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:30,483 INFO [StoreOpener-039d8ca4d0468f0362e2baff27ee7042-1 {}] regionserver.HStore(327): Store=039d8ca4d0468f0362e2baff27ee7042/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:36:30,486 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:36:30,487 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:36:30,500 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:36:30,508 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:36:30,511 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1102): Opened 039d8ca4d0468f0362e2baff27ee7042; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62368216, jitterRate=-0.0706411600112915}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:36:30,513 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 039d8ca4d0468f0362e2baff27ee7042: 2024-12-12T20:36:30,516 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042., pid=11, masterSystemTime=1734035790435 2024-12-12T20:36:30,534 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:36:30,534 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(164): Opened hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:36:30,540 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=039d8ca4d0468f0362e2baff27ee7042, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:36:30,546 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=0a737713f6e3,46859,1734035782031, table=hbase:acl, region=039d8ca4d0468f0362e2baff27ee7042. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-12T20:36:30,601 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-12T20:36:30,612 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 039d8ca4d0468f0362e2baff27ee7042, server=0a737713f6e3,46859,1734035782031 in 289 msec 2024-12-12T20:36:30,628 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-12T20:36:30,628 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=hbase:acl, region=039d8ca4d0468f0362e2baff27ee7042, ASSIGN in 538 msec 2024-12-12T20:36:30,632 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:36:30,633 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035790632"}]},"ts":"1734035790632"} 2024-12-12T20:36:30,652 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-12T20:36:30,691 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:36:30,731 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=hbase:acl in 1.5670 sec 2024-12-12T20:36:31,130 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:36:31,131 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-12T20:36:31,132 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-12T20:36:31,132 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-12T20:36:31,166 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:36:31,166 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-12T20:36:31,167 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-12T20:36:31,167 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-12T20:36:31,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-12T20:36:31,169 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-12T20:36:31,180 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-12T20:36:31,180 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-12T20:36:31,183 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:36:31,183 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-12T20:36:31,184 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-12T20:36:31,184 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-12T20:36:31,184 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T20:36:31,184 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-12T20:36:31,263 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T20:36:31,263 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: hbase:acl, procId: 9 completed 2024-12-12T20:36:31,359 DEBUG [master/0a737713f6e3:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-12T20:36:31,360 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-12T20:36:31,361 INFO [master/0a737713f6e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0a737713f6e3,43067,1734035780320-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T20:36:31,872 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-12T20:36:31,875 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-12T20:36:31,878 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-12T20:36:32,768 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-resourcemanager.properties,hadoop-metrics2.properties 2024-12-12T20:36:34,893 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:35,294 WARN [Thread-401 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:36,000 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T20:36:36,010 WARN [Thread-401 {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-12T20:36:36,012 INFO [Thread-401 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T20:36:36,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T20:36:36,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T20:36:36,054 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T20:36:36,076 INFO [Thread-401 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T20:36:36,076 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:36,076 INFO [Thread-401 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T20:36:36,076 INFO [Thread-401 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T20:36:36,086 INFO [Thread-401 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cce8bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,AVAILABLE} 2024-12-12T20:36:36,086 INFO [Thread-401 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e2866a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-12T20:36:36,100 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5195b339{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,AVAILABLE} 2024-12-12T20:36:36,100 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e0bbdd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-12T20:36:36,420 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class 2024-12-12T20:36:36,421 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class 2024-12-12T20:36:36,421 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-12T20:36:36,432 INFO [Thread-401 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-12T20:36:36,545 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T20:36:37,076 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T20:36:37,677 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest" 2024-12-12T20:36:37,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@158883e3{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/java.io.tmpdir/jetty-localhost-36933-hadoop-yarn-common-3_4_1_jar-_-any-9610392975635246200/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-12T20:36:37,719 INFO [Thread-401 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@46f85e31{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/java.io.tmpdir/jetty-localhost-36407-hadoop-yarn-common-3_4_1_jar-_-any-6299466015832113771/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-12T20:36:37,720 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@614a7d80{HTTP/1.1, (http/1.1)}{localhost:36933} 2024-12-12T20:36:37,720 INFO [Time-limited test {}] server.Server(415): Started @28784ms 2024-12-12T20:36:37,731 INFO [Thread-401 {}] server.AbstractConnector(333): Started ServerConnector@53ed644b{HTTP/1.1, (http/1.1)}{localhost:36407} 2024-12-12T20:36:37,731 INFO [Thread-401 {}] server.Server(415): Started @28796ms 2024-12-12T20:36:38,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741843_1019 (size=5) 2024-12-12T20:36:38,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741843_1019 (size=5) 2024-12-12T20:36:38,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741843_1019 (size=5) 2024-12-12T20:36:40,799 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-12T20:36:40,816 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:40,857 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-12T20:36:40,858 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T20:36:40,936 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T20:36:40,936 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T20:36:40,936 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T20:36:40,951 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:40,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a3ef6b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,AVAILABLE} 2024-12-12T20:36:40,955 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65442ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-12T20:36:41,083 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-12T20:36:41,083 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-12T20:36:41,084 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-12T20:36:41,084 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-12T20:36:41,139 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T20:36:41,209 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T20:36:41,585 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T20:36:41,625 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@bd830f5{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/java.io.tmpdir/jetty-localhost-45935-hadoop-yarn-common-3_4_1_jar-_-any-13489744733624403529/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-12T20:36:41,629 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4afb2ea7{HTTP/1.1, (http/1.1)}{localhost:45935} 2024-12-12T20:36:41,629 INFO [Time-limited test {}] server.Server(415): Started @32694ms 2024-12-12T20:36:42,363 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-12T20:36:42,369 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:42,405 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-12T20:36:42,406 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T20:36:42,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T20:36:42,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T20:36:42,428 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T20:36:42,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T20:36:42,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c3310d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,AVAILABLE} 2024-12-12T20:36:42,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d43ef08{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-12T20:36:42,497 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-12T20:36:42,497 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-12T20:36:42,497 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-12T20:36:42,497 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-12T20:36:42,507 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T20:36:42,515 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T20:36:42,663 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T20:36:42,671 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f060b3e{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/java.io.tmpdir/jetty-localhost-39579-hadoop-yarn-common-3_4_1_jar-_-any-15085463378190354710/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-12T20:36:42,672 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1dd43c4{HTTP/1.1, (http/1.1)}{localhost:39579} 2024-12-12T20:36:42,672 INFO [Time-limited test {}] server.Server(415): Started @33737ms 2024-12-12T20:36:42,721 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2825): Mini mapreduce cluster started 2024-12-12T20:36:42,722 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:36:42,768 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=716, OpenFileDescriptor=771, MaxFileDescriptor=1048576, SystemLoadAverage=1447, ProcessCount=11, AvailableMemoryMB=3868 2024-12-12T20:36:42,768 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=716 is superior to 500 2024-12-12T20:36:42,779 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T20:36:42,783 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T20:36:42,794 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:36:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-12T20:36:42,799 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:36:42,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 12 2024-12-12T20:36:42,800 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:42,803 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:36:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T20:36:42,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741844_1020 (size=406) 2024-12-12T20:36:42,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741844_1020 (size=406) 2024-12-12T20:36:42,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741844_1020 (size=406) 2024-12-12T20:36:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T20:36:43,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T20:36:43,268 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 61ad8a9b414ee40aeebf5ddfbd8281f4, NAME => 'testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:36:43,284 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 3966f962b82fb93920b9fa4a5587af0e, NAME => 'testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:36:43,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741845_1021 (size=67) 2024-12-12T20:36:43,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741845_1021 (size=67) 2024-12-12T20:36:43,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741845_1021 (size=67) 2024-12-12T20:36:43,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741846_1022 (size=67) 2024-12-12T20:36:43,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741846_1022 (size=67) 2024-12-12T20:36:43,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741846_1022 (size=67) 2024-12-12T20:36:43,429 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:43,430 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1681): Closing 61ad8a9b414ee40aeebf5ddfbd8281f4, disabling compactions & flushes 2024-12-12T20:36:43,430 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:43,430 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:43,430 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. after waiting 0 ms 2024-12-12T20:36:43,430 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:43,430 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:43,430 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1635): Region close journal for 61ad8a9b414ee40aeebf5ddfbd8281f4: 2024-12-12T20:36:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T20:36:43,783 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:43,783 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1681): Closing 3966f962b82fb93920b9fa4a5587af0e, disabling compactions & flushes 2024-12-12T20:36:43,783 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:36:43,783 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:36:43,783 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. after waiting 0 ms 2024-12-12T20:36:43,783 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:36:43,783 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:36:43,783 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1635): Region close journal for 3966f962b82fb93920b9fa4a5587af0e: 2024-12-12T20:36:43,785 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:36:43,786 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734035803785"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035803785"}]},"ts":"1734035803785"} 2024-12-12T20:36:43,786 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734035803785"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035803785"}]},"ts":"1734035803785"} 2024-12-12T20:36:43,830 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:36:43,833 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:36:43,833 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035803833"}]},"ts":"1734035803833"} 2024-12-12T20:36:43,840 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-12T20:36:43,857 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:36:43,859 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:36:43,859 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:36:43,859 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:36:43,859 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:36:43,859 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:36:43,859 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:36:43,859 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:36:43,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=61ad8a9b414ee40aeebf5ddfbd8281f4, ASSIGN}, {pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3966f962b82fb93920b9fa4a5587af0e, ASSIGN}] 2024-12-12T20:36:43,862 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3966f962b82fb93920b9fa4a5587af0e, ASSIGN 2024-12-12T20:36:43,862 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=61ad8a9b414ee40aeebf5ddfbd8281f4, ASSIGN 2024-12-12T20:36:43,864 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3966f962b82fb93920b9fa4a5587af0e, ASSIGN; state=OFFLINE, location=0a737713f6e3,42407,1734035781742; forceNewPlan=false, retain=false 2024-12-12T20:36:43,864 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=61ad8a9b414ee40aeebf5ddfbd8281f4, ASSIGN; state=OFFLINE, location=0a737713f6e3,36817,1734035782356; forceNewPlan=false, retain=false 2024-12-12T20:36:43,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T20:36:44,014 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:36:44,015 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=3966f962b82fb93920b9fa4a5587af0e, regionState=OPENING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:36:44,015 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=61ad8a9b414ee40aeebf5ddfbd8281f4, regionState=OPENING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:36:44,018 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=13, state=RUNNABLE; OpenRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:36:44,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=16, ppid=14, state=RUNNABLE; OpenRegionProcedure 3966f962b82fb93920b9fa4a5587af0e, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:36:44,178 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:36:44,178 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T20:36:44,182 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:36:44,182 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T20:36:44,207 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50786, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T20:36:44,211 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37112, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T20:36:44,275 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:36:44,276 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7285): Opening region: {ENCODED => 3966f962b82fb93920b9fa4a5587af0e, NAME => 'testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T20:36:44,277 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. service=AccessControlService 2024-12-12T20:36:44,277 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:36:44,278 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:44,278 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:44,278 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7327): checking encryption for 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:44,278 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7330): checking classloading for 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:44,283 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:44,284 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7285): Opening region: {ENCODED => 61ad8a9b414ee40aeebf5ddfbd8281f4, NAME => 'testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T20:36:44,284 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. service=AccessControlService 2024-12-12T20:36:44,284 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:36:44,285 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:44,285 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:36:44,285 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7327): checking encryption for 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:44,285 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7330): checking classloading for 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:44,296 INFO [StoreOpener-3966f962b82fb93920b9fa4a5587af0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:44,300 INFO [StoreOpener-3966f962b82fb93920b9fa4a5587af0e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3966f962b82fb93920b9fa4a5587af0e columnFamilyName cf 2024-12-12T20:36:44,300 DEBUG [StoreOpener-3966f962b82fb93920b9fa4a5587af0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:44,301 INFO [StoreOpener-3966f962b82fb93920b9fa4a5587af0e-1 {}] regionserver.HStore(327): Store=3966f962b82fb93920b9fa4a5587af0e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:36:44,302 INFO [StoreOpener-61ad8a9b414ee40aeebf5ddfbd8281f4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:44,303 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:44,305 INFO [StoreOpener-61ad8a9b414ee40aeebf5ddfbd8281f4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 61ad8a9b414ee40aeebf5ddfbd8281f4 columnFamilyName cf 2024-12-12T20:36:44,305 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:44,306 DEBUG [StoreOpener-61ad8a9b414ee40aeebf5ddfbd8281f4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:36:44,307 INFO [StoreOpener-61ad8a9b414ee40aeebf5ddfbd8281f4-1 {}] regionserver.HStore(327): Store=61ad8a9b414ee40aeebf5ddfbd8281f4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:36:44,309 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:44,311 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:44,313 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1085): writing seq id for 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:44,315 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1085): writing seq id for 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:44,337 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:36:44,344 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1102): Opened 3966f962b82fb93920b9fa4a5587af0e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60864126, jitterRate=-0.09305384755134583}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:36:44,345 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1001): Region open journal for 3966f962b82fb93920b9fa4a5587af0e: 2024-12-12T20:36:44,351 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:36:44,352 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e., pid=16, masterSystemTime=1734035804182 2024-12-12T20:36:44,353 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1102): Opened 61ad8a9b414ee40aeebf5ddfbd8281f4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63753323, jitterRate=-0.05000145733356476}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:36:44,353 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1001): Region open journal for 61ad8a9b414ee40aeebf5ddfbd8281f4: 2024-12-12T20:36:44,357 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:36:44,357 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:36:44,358 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4., pid=15, masterSystemTime=1734035804178 2024-12-12T20:36:44,361 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=3966f962b82fb93920b9fa4a5587af0e, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:36:44,365 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:44,365 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:44,366 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=61ad8a9b414ee40aeebf5ddfbd8281f4, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:36:44,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=14 2024-12-12T20:36:44,390 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=14, state=SUCCESS; OpenRegionProcedure 3966f962b82fb93920b9fa4a5587af0e, server=0a737713f6e3,42407,1734035781742 in 348 msec 2024-12-12T20:36:44,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=13 2024-12-12T20:36:44,403 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3966f962b82fb93920b9fa4a5587af0e, ASSIGN in 523 msec 2024-12-12T20:36:44,403 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=13, state=SUCCESS; OpenRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4, server=0a737713f6e3,36817,1734035782356 in 356 msec 2024-12-12T20:36:44,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-12T20:36:44,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=61ad8a9b414ee40aeebf5ddfbd8281f4, ASSIGN in 535 msec 2024-12-12T20:36:44,414 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:36:44,414 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035804414"}]},"ts":"1734035804414"} 2024-12-12T20:36:44,420 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-12T20:36:44,435 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:36:44,440 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-12T20:36:44,489 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-12T20:36:44,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-12T20:36:44,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-12T20:36:44,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-12T20:36:44,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:44,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:44,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:44,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-12T20:36:44,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:36:44,599 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-12T20:36:44,599 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-12T20:36:44,600 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-12T20:36:44,604 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithTargetName in 1.8030 sec 2024-12-12T20:36:44,607 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-12T20:36:44,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T20:36:44,951 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName, procId: 12 completed 2024-12-12T20:36:44,951 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-12T20:36:44,952 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:36:44,958 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-12T20:36:44,959 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:36:44,959 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithTargetName assigned. 2024-12-12T20:36:44,977 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-12T20:36:44,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035804977 (current time:1734035804977). 2024-12-12T20:36:44,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:36:44,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-12T20:36:44,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:36:44,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x243b0bd7 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ef26fa4 2024-12-12T20:36:44,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a02aef6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:36:44,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:36:44,994 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52098, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:36:44,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x243b0bd7 to 127.0.0.1:52655 2024-12-12T20:36:44,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:36:44,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0bdbe9ef to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@90625b4 2024-12-12T20:36:45,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17549768, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:36:45,019 DEBUG [hconnection-0x28031a9d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:36:45,020 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52114, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:36:45,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0bdbe9ef to 127.0.0.1:52655 2024-12-12T20:36:45,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:36:45,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-12T20:36:45,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:36:45,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=17, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-12T20:36:45,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-12T20:36:45,048 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:36:45,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-12T20:36:45,055 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:36:45,072 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:36:45,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741847_1023 (size=167) 2024-12-12T20:36:45,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741847_1023 (size=167) 2024-12-12T20:36:45,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741847_1023 (size=167) 2024-12-12T20:36:45,111 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:36:45,114 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4}, {pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 3966f962b82fb93920b9fa4a5587af0e}] 2024-12-12T20:36:45,120 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:45,120 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:45,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-12T20:36:45,288 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:36:45,290 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:36:45,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=18 2024-12-12T20:36:45,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=19 2024-12-12T20:36:45,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:45,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.HRegion(2538): Flush status journal for 61ad8a9b414ee40aeebf5ddfbd8281f4: 2024-12-12T20:36:45,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. for emptySnaptb0-testExportWithTargetName completed. 2024-12-12T20:36:45,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-12T20:36:45,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:36:45,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 3966f962b82fb93920b9fa4a5587af0e: 2024-12-12T20:36:45,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. for emptySnaptb0-testExportWithTargetName completed. 2024-12-12T20:36:45,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-12T20:36:45,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:36:45,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:36:45,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:36:45,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:36:45,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-12T20:36:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741849_1025 (size=70) 2024-12-12T20:36:45,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741849_1025 (size=70) 2024-12-12T20:36:45,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741849_1025 (size=70) 2024-12-12T20:36:45,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:36:45,376 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-12T20:36:45,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-12T20:36:45,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:45,380 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:45,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=17, state=SUCCESS; SnapshotRegionProcedure 3966f962b82fb93920b9fa4a5587af0e in 268 msec 2024-12-12T20:36:45,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741848_1024 (size=70) 2024-12-12T20:36:45,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741848_1024 (size=70) 2024-12-12T20:36:45,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741848_1024 (size=70) 2024-12-12T20:36:45,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:45,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=18 2024-12-12T20:36:45,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=18 2024-12-12T20:36:45,419 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:45,419 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:45,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=17 2024-12-12T20:36:45,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=17, state=SUCCESS; SnapshotRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4 in 317 msec 2024-12-12T20:36:45,446 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:36:45,450 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:36:45,457 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:36:45,457 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-12T20:36:45,469 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-12T20:36:45,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741850_1026 (size=549) 2024-12-12T20:36:45,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741850_1026 (size=549) 2024-12-12T20:36:45,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741850_1026 (size=549) 2024-12-12T20:36:45,652 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:36:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-12T20:36:45,693 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:36:45,703 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-12T20:36:45,714 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:36:45,715 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-12T20:36:45,725 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 673 msec 2024-12-12T20:36:46,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-12T20:36:46,175 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 17 completed 2024-12-12T20:36:46,200 DEBUG [htable-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:36:46,201 DEBUG [htable-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:36:46,203 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37116, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:36:46,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36817 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:36:46,206 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50798, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:36:46,210 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42407 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:36:46,223 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithTargetName 2024-12-12T20:36:46,224 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:46,225 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:36:46,271 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-12T20:36:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035806271 (current time:1734035806271). 2024-12-12T20:36:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:36:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-12T20:36:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:36:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x625288e8 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ed7a62b 2024-12-12T20:36:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30b7a2fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:36:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:36:46,420 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52124, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:36:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x625288e8 to 127.0.0.1:52655 2024-12-12T20:36:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:36:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77845722 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4114107 2024-12-12T20:36:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@93ca33b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:36:46,592 DEBUG [hconnection-0x3437eed8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:36:46,594 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52132, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:36:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77845722 to 127.0.0.1:52655 2024-12-12T20:36:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:36:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-12T20:36:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:36:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-12T20:36:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-12T20:36:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T20:36:46,604 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:36:46,606 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:36:46,609 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:36:46,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741851_1027 (size=162) 2024-12-12T20:36:46,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741851_1027 (size=162) 2024-12-12T20:36:46,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741851_1027 (size=162) 2024-12-12T20:36:46,628 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:36:46,630 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4}, {pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 3966f962b82fb93920b9fa4a5587af0e}] 2024-12-12T20:36:46,637 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:46,637 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T20:36:46,789 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:36:46,791 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:36:46,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=21 2024-12-12T20:36:46,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=22 2024-12-12T20:36:46,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:46,793 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 61ad8a9b414ee40aeebf5ddfbd8281f4 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-12T20:36:46,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:36:46,795 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2837): Flushing 3966f962b82fb93920b9fa4a5587af0e 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-12T20:36:46,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/.tmp/cf/9e937bfdd1744227abaa48f44cc73f63 is 71, key is 05b9c2de189542e405adbbdb30b53739/cf:q/1734035806204/Put/seqid=0 2024-12-12T20:36:46,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/.tmp/cf/fbd9caf873da487e9b6b5aafd38a106b is 71, key is 13c03cf1a9aeff0340323855d3e07b7e/cf:q/1734035806209/Put/seqid=0 2024-12-12T20:36:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T20:36:47,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741852_1028 (size=8258) 2024-12-12T20:36:47,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741852_1028 (size=8258) 2024-12-12T20:36:47,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741852_1028 (size=8258) 2024-12-12T20:36:47,025 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/.tmp/cf/fbd9caf873da487e9b6b5aafd38a106b 2024-12-12T20:36:47,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741853_1029 (size=5354) 2024-12-12T20:36:47,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741853_1029 (size=5354) 2024-12-12T20:36:47,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741853_1029 (size=5354) 2024-12-12T20:36:47,053 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/.tmp/cf/9e937bfdd1744227abaa48f44cc73f63 2024-12-12T20:36:47,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/.tmp/cf/fbd9caf873da487e9b6b5aafd38a106b as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/cf/fbd9caf873da487e9b6b5aafd38a106b 2024-12-12T20:36:47,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/.tmp/cf/9e937bfdd1744227abaa48f44cc73f63 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/cf/9e937bfdd1744227abaa48f44cc73f63 2024-12-12T20:36:47,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T20:36:47,242 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/cf/9e937bfdd1744227abaa48f44cc73f63, entries=4, sequenceid=6, filesize=5.2 K 2024-12-12T20:36:47,258 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/cf/fbd9caf873da487e9b6b5aafd38a106b, entries=46, sequenceid=6, filesize=8.1 K 2024-12-12T20:36:47,263 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 61ad8a9b414ee40aeebf5ddfbd8281f4 in 470ms, sequenceid=6, compaction requested=false 2024-12-12T20:36:47,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-12T20:36:47,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 61ad8a9b414ee40aeebf5ddfbd8281f4: 2024-12-12T20:36:47,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. for snaptb0-testExportWithTargetName completed. 2024-12-12T20:36:47,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-12T20:36:47,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:36:47,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/cf/9e937bfdd1744227abaa48f44cc73f63] hfiles 2024-12-12T20:36:47,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/cf/9e937bfdd1744227abaa48f44cc73f63 for snapshot=snaptb0-testExportWithTargetName 2024-12-12T20:36:47,272 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 3966f962b82fb93920b9fa4a5587af0e in 477ms, sequenceid=6, compaction requested=false 2024-12-12T20:36:47,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2538): Flush status journal for 3966f962b82fb93920b9fa4a5587af0e: 2024-12-12T20:36:47,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. for snaptb0-testExportWithTargetName completed. 2024-12-12T20:36:47,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-12T20:36:47,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:36:47,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/cf/fbd9caf873da487e9b6b5aafd38a106b] hfiles 2024-12-12T20:36:47,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/cf/fbd9caf873da487e9b6b5aafd38a106b for snapshot=snaptb0-testExportWithTargetName 2024-12-12T20:36:47,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741854_1030 (size=109) 2024-12-12T20:36:47,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741854_1030 (size=109) 2024-12-12T20:36:47,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741854_1030 (size=109) 2024-12-12T20:36:47,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:36:47,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=22 2024-12-12T20:36:47,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=22 2024-12-12T20:36:47,424 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:47,424 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:36:47,439 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, ppid=20, state=SUCCESS; SnapshotRegionProcedure 3966f962b82fb93920b9fa4a5587af0e in 802 msec 2024-12-12T20:36:47,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741855_1031 (size=109) 2024-12-12T20:36:47,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741855_1031 (size=109) 2024-12-12T20:36:47,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741855_1031 (size=109) 2024-12-12T20:36:47,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:36:47,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-12T20:36:47,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-12T20:36:47,466 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:47,466 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:36:47,479 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-12T20:36:47,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; SnapshotRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4 in 840 msec 2024-12-12T20:36:47,480 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:36:47,488 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:36:47,489 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:36:47,490 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-12T20:36:47,491 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-12T20:36:47,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741856_1032 (size=627) 2024-12-12T20:36:47,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741856_1032 (size=627) 2024-12-12T20:36:47,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741856_1032 (size=627) 2024-12-12T20:36:47,588 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:36:47,624 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:36:47,628 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-12T20:36:47,632 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:36:47,632 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-12T20:36:47,640 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 1.0310 sec 2024-12-12T20:36:47,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T20:36:47,721 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 20 completed 2024-12-12T20:36:47,721 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035807721 2024-12-12T20:36:47,721 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37615, tgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035807721, rawTgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035807721, srcFsUri=hdfs://localhost:37615, srcDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:36:47,765 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37615, inputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:36:47,765 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035807721, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035807721/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-12T20:36:47,770 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-12T20:36:47,800 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035807721/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-12T20:36:47,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741857_1033 (size=162) 2024-12-12T20:36:47,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741857_1033 (size=162) 2024-12-12T20:36:47,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741857_1033 (size=162) 2024-12-12T20:36:47,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741858_1034 (size=627) 2024-12-12T20:36:47,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741858_1034 (size=627) 2024-12-12T20:36:47,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741858_1034 (size=627) 2024-12-12T20:36:48,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741859_1035 (size=154) 2024-12-12T20:36:48,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741859_1035 (size=154) 2024-12-12T20:36:48,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741859_1035 (size=154) 2024-12-12T20:36:48,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:48,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:48,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:48,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:49,295 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:36:49,619 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-3414247359563295190.jar 2024-12-12T20:36:49,620 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:49,620 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:49,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-3481963244932197916.jar 2024-12-12T20:36:49,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:49,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:49,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:49,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:49,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:49,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-12T20:36:49,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T20:36:49,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T20:36:49,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T20:36:49,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T20:36:49,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T20:36:49,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T20:36:49,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-12T20:36:49,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T20:36:49,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T20:36:49,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T20:36:49,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T20:36:49,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T20:36:49,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:36:49,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:36:49,721 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:36:49,721 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:36:49,721 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:36:49,722 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:36:49,722 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:36:49,851 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:36:50,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741860_1036 (size=127628) 2024-12-12T20:36:50,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741860_1036 (size=127628) 2024-12-12T20:36:50,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741860_1036 (size=127628) 2024-12-12T20:36:50,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741861_1037 (size=2172101) 2024-12-12T20:36:50,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741861_1037 (size=2172101) 2024-12-12T20:36:50,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741861_1037 (size=2172101) 2024-12-12T20:36:50,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741862_1038 (size=213228) 2024-12-12T20:36:50,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741862_1038 (size=213228) 2024-12-12T20:36:50,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741862_1038 (size=213228) 2024-12-12T20:36:50,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741863_1039 (size=1877034) 2024-12-12T20:36:50,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741863_1039 (size=1877034) 2024-12-12T20:36:50,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741863_1039 (size=1877034) 2024-12-12T20:36:50,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741864_1040 (size=533455) 2024-12-12T20:36:50,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741864_1040 (size=533455) 2024-12-12T20:36:50,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741864_1040 (size=533455) 2024-12-12T20:36:50,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741865_1041 (size=7280644) 2024-12-12T20:36:50,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741865_1041 (size=7280644) 2024-12-12T20:36:50,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741865_1041 (size=7280644) 2024-12-12T20:36:50,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741866_1042 (size=4188619) 2024-12-12T20:36:50,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741866_1042 (size=4188619) 2024-12-12T20:36:50,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741866_1042 (size=4188619) 2024-12-12T20:36:50,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741867_1043 (size=20406) 2024-12-12T20:36:50,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741867_1043 (size=20406) 2024-12-12T20:36:50,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741867_1043 (size=20406) 2024-12-12T20:36:51,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741868_1044 (size=75495) 2024-12-12T20:36:51,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741868_1044 (size=75495) 2024-12-12T20:36:51,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741868_1044 (size=75495) 2024-12-12T20:36:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-12T20:36:51,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-12T20:36:51,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741869_1045 (size=45609) 2024-12-12T20:36:51,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741869_1045 (size=45609) 2024-12-12T20:36:51,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741869_1045 (size=45609) 2024-12-12T20:36:51,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741870_1046 (size=110084) 2024-12-12T20:36:51,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741870_1046 (size=110084) 2024-12-12T20:36:51,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741870_1046 (size=110084) 2024-12-12T20:36:51,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741871_1047 (size=1323991) 2024-12-12T20:36:51,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741871_1047 (size=1323991) 2024-12-12T20:36:51,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741871_1047 (size=1323991) 2024-12-12T20:36:51,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741872_1048 (size=23076) 2024-12-12T20:36:51,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741872_1048 (size=23076) 2024-12-12T20:36:51,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741872_1048 (size=23076) 2024-12-12T20:36:51,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741873_1049 (size=451756) 2024-12-12T20:36:51,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741873_1049 (size=451756) 2024-12-12T20:36:51,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741873_1049 (size=451756) 2024-12-12T20:36:51,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741874_1050 (size=126803) 2024-12-12T20:36:51,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741874_1050 (size=126803) 2024-12-12T20:36:51,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741874_1050 (size=126803) 2024-12-12T20:36:51,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741875_1051 (size=322274) 2024-12-12T20:36:51,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741875_1051 (size=322274) 2024-12-12T20:36:51,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741875_1051 (size=322274) 2024-12-12T20:36:51,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741876_1052 (size=1832290) 2024-12-12T20:36:51,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741876_1052 (size=1832290) 2024-12-12T20:36:51,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741876_1052 (size=1832290) 2024-12-12T20:36:51,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741877_1053 (size=30081) 2024-12-12T20:36:51,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741877_1053 (size=30081) 2024-12-12T20:36:51,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741877_1053 (size=30081) 2024-12-12T20:36:51,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741878_1054 (size=53616) 2024-12-12T20:36:51,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741878_1054 (size=53616) 2024-12-12T20:36:51,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741878_1054 (size=53616) 2024-12-12T20:36:51,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741879_1055 (size=29229) 2024-12-12T20:36:51,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741879_1055 (size=29229) 2024-12-12T20:36:51,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741879_1055 (size=29229) 2024-12-12T20:36:51,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741880_1056 (size=169089) 2024-12-12T20:36:51,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741880_1056 (size=169089) 2024-12-12T20:36:51,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741880_1056 (size=169089) 2024-12-12T20:36:51,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741881_1057 (size=6350857) 2024-12-12T20:36:51,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741881_1057 (size=6350857) 2024-12-12T20:36:51,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741881_1057 (size=6350857) 2024-12-12T20:36:51,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741882_1058 (size=5175431) 2024-12-12T20:36:51,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741882_1058 (size=5175431) 2024-12-12T20:36:51,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741882_1058 (size=5175431) 2024-12-12T20:36:52,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741883_1059 (size=136454) 2024-12-12T20:36:52,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741883_1059 (size=136454) 2024-12-12T20:36:52,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741883_1059 (size=136454) 2024-12-12T20:36:52,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741884_1060 (size=907861) 2024-12-12T20:36:52,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741884_1060 (size=907861) 2024-12-12T20:36:52,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741884_1060 (size=907861) 2024-12-12T20:36:52,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741885_1061 (size=3317408) 2024-12-12T20:36:52,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741885_1061 (size=3317408) 2024-12-12T20:36:52,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741885_1061 (size=3317408) 2024-12-12T20:36:52,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741886_1062 (size=503880) 2024-12-12T20:36:52,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741886_1062 (size=503880) 2024-12-12T20:36:52,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741886_1062 (size=503880) 2024-12-12T20:36:52,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741887_1063 (size=4695811) 2024-12-12T20:36:52,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741887_1063 (size=4695811) 2024-12-12T20:36:52,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741887_1063 (size=4695811) 2024-12-12T20:36:52,566 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T20:36:52,576 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-12T20:36:52,585 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-12T20:36:52,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741888_1064 (size=342) 2024-12-12T20:36:52,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741888_1064 (size=342) 2024-12-12T20:36:52,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741888_1064 (size=342) 2024-12-12T20:36:52,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741889_1065 (size=15) 2024-12-12T20:36:52,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741889_1065 (size=15) 2024-12-12T20:36:52,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741889_1065 (size=15) 2024-12-12T20:36:53,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741890_1066 (size=304886) 2024-12-12T20:36:53,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741890_1066 (size=304886) 2024-12-12T20:36:53,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741890_1066 (size=304886) 2024-12-12T20:36:53,666 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:36:53,666 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:36:53,945 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0001_000001 (auth:SIMPLE) from 127.0.0.1:41030 2024-12-12T20:36:56,094 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T20:36:56,100 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T20:37:00,078 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T20:37:00,080 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56504, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T20:37:00,086 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T20:37:00,088 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56510, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T20:37:05,799 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0001_000001 (auth:SIMPLE) from 127.0.0.1:47368 2024-12-12T20:37:06,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741891_1067 (size=350560) 2024-12-12T20:37:06,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741891_1067 (size=350560) 2024-12-12T20:37:06,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741891_1067 (size=350560) 2024-12-12T20:37:08,452 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0001_000001 (auth:SIMPLE) from 127.0.0.1:54278 2024-12-12T20:37:11,364 INFO [master/0a737713f6e3:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-12T20:37:11,364 INFO [master/0a737713f6e3:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-12T20:37:15,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741892_1068 (size=8258) 2024-12-12T20:37:15,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741892_1068 (size=8258) 2024-12-12T20:37:15,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741892_1068 (size=8258) 2024-12-12T20:37:16,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741893_1069 (size=5354) 2024-12-12T20:37:16,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741893_1069 (size=5354) 2024-12-12T20:37:16,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741893_1069 (size=5354) 2024-12-12T20:37:16,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741894_1070 (size=17423) 2024-12-12T20:37:16,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741894_1070 (size=17423) 2024-12-12T20:37:16,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741894_1070 (size=17423) 2024-12-12T20:37:16,405 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0001/container_1734035798001_0001_01_000002/launch_container.sh] 2024-12-12T20:37:16,406 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0001/container_1734035798001_0001_01_000002/container_tokens] 2024-12-12T20:37:16,406 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0001/container_1734035798001_0001_01_000002/sysfs] 2024-12-12T20:37:16,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741895_1071 (size=464) 2024-12-12T20:37:16,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741895_1071 (size=464) 2024-12-12T20:37:16,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741895_1071 (size=464) 2024-12-12T20:37:16,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741896_1072 (size=17423) 2024-12-12T20:37:16,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741896_1072 (size=17423) 2024-12-12T20:37:16,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741896_1072 (size=17423) 2024-12-12T20:37:17,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741897_1073 (size=350560) 2024-12-12T20:37:17,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741897_1073 (size=350560) 2024-12-12T20:37:17,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741897_1073 (size=350560) 2024-12-12T20:37:19,158 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-12T20:37:19,160 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-12T20:37:19,177 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: testExportWithTargetName 2024-12-12T20:37:19,177 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-12T20:37:19,178 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-12T20:37:19,178 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-12T20:37:19,183 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-12T20:37:19,183 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-12T20:37:19,183 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035807721/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035807721/.hbase-snapshot/testExportWithTargetName 2024-12-12T20:37:19,184 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035807721/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-12T20:37:19,184 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035807721/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-12T20:37:19,213 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithTargetName 2024-12-12T20:37:19,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-12T20:37:19,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=23, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-12T20:37:19,236 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035839235"}]},"ts":"1734035839235"} 2024-12-12T20:37:19,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-12T20:37:19,238 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-12T20:37:19,249 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-12T20:37:19,253 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-12T20:37:19,268 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=61ad8a9b414ee40aeebf5ddfbd8281f4, UNASSIGN}, {pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3966f962b82fb93920b9fa4a5587af0e, UNASSIGN}] 2024-12-12T20:37:19,275 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3966f962b82fb93920b9fa4a5587af0e, UNASSIGN 2024-12-12T20:37:19,275 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=61ad8a9b414ee40aeebf5ddfbd8281f4, UNASSIGN 2024-12-12T20:37:19,277 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=61ad8a9b414ee40aeebf5ddfbd8281f4, regionState=CLOSING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:37:19,277 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=3966f962b82fb93920b9fa4a5587af0e, regionState=CLOSING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:37:19,288 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:37:19,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; CloseRegionProcedure 3966f962b82fb93920b9fa4a5587af0e, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:37:19,294 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:37:19,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=25, state=RUNNABLE; CloseRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:37:19,296 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:37:19,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-12T20:37:19,457 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:37:19,457 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:37:19,459 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(124): Close 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:37:19,460 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:37:19,461 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1681): Closing 3966f962b82fb93920b9fa4a5587af0e, disabling compactions & flushes 2024-12-12T20:37:19,461 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:37:19,461 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:37:19,461 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. after waiting 0 ms 2024-12-12T20:37:19,461 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:37:19,467 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(124): Close 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:37:19,467 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:37:19,467 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1681): Closing 61ad8a9b414ee40aeebf5ddfbd8281f4, disabling compactions & flushes 2024-12-12T20:37:19,467 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:37:19,467 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:37:19,468 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. after waiting 0 ms 2024-12-12T20:37:19,468 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:37:19,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-12T20:37:19,599 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:37:19,607 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:37:19,627 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:37:19,627 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4. 2024-12-12T20:37:19,628 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1635): Region close journal for 61ad8a9b414ee40aeebf5ddfbd8281f4: 2024-12-12T20:37:19,627 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:37:19,628 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e. 2024-12-12T20:37:19,628 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1635): Region close journal for 3966f962b82fb93920b9fa4a5587af0e: 2024-12-12T20:37:19,634 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(170): Closed 3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:37:19,635 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=3966f962b82fb93920b9fa4a5587af0e, regionState=CLOSED 2024-12-12T20:37:19,638 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(170): Closed 61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:37:19,640 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=61ad8a9b414ee40aeebf5ddfbd8281f4, regionState=CLOSED 2024-12-12T20:37:19,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-12T20:37:19,653 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=25 2024-12-12T20:37:19,653 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=25, state=SUCCESS; CloseRegionProcedure 61ad8a9b414ee40aeebf5ddfbd8281f4, server=0a737713f6e3,36817,1734035782356 in 349 msec 2024-12-12T20:37:19,654 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3966f962b82fb93920b9fa4a5587af0e, UNASSIGN in 382 msec 2024-12-12T20:37:19,654 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; CloseRegionProcedure 3966f962b82fb93920b9fa4a5587af0e, server=0a737713f6e3,42407,1734035781742 in 352 msec 2024-12-12T20:37:19,662 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-12T20:37:19,662 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=61ad8a9b414ee40aeebf5ddfbd8281f4, UNASSIGN in 385 msec 2024-12-12T20:37:19,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=24, resume processing ppid=23 2024-12-12T20:37:19,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, ppid=23, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 411 msec 2024-12-12T20:37:19,674 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035839673"}]},"ts":"1734035839673"} 2024-12-12T20:37:19,676 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-12T20:37:19,723 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-12T20:37:19,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithTargetName in 506 msec 2024-12-12T20:37:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-12T20:37:19,858 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName, procId: 23 completed 2024-12-12T20:37:19,867 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-12T20:37:19,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T20:37:19,888 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T20:37:19,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-12T20:37:19,892 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=29, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T20:37:19,901 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-12T20:37:19,902 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:37:19,904 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:37:19,914 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/recovered.edits] 2024-12-12T20:37:19,918 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/recovered.edits] 2024-12-12T20:37:19,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T20:37:19,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T20:37:19,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T20:37:19,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T20:37:19,926 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-12T20:37:19,926 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-12T20:37:19,926 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-12T20:37:19,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T20:37:19,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:19,945 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-12T20:37:19,945 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-12T20:37:19,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T20:37:19,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:19,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T20:37:19,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:19,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:19,958 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/cf/fbd9caf873da487e9b6b5aafd38a106b to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/cf/fbd9caf873da487e9b6b5aafd38a106b 2024-12-12T20:37:19,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-12T20:37:19,972 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/cf/9e937bfdd1744227abaa48f44cc73f63 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/cf/9e937bfdd1744227abaa48f44cc73f63 2024-12-12T20:37:19,998 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4/recovered.edits/9.seqid 2024-12-12T20:37:19,999 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/61ad8a9b414ee40aeebf5ddfbd8281f4 2024-12-12T20:37:20,001 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e/recovered.edits/9.seqid 2024-12-12T20:37:20,004 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithTargetName/3966f962b82fb93920b9fa4a5587af0e 2024-12-12T20:37:20,004 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-12T20:37:20,007 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=29, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T20:37:20,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46859 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-12T20:37:20,020 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-12T20:37:20,033 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-12T20:37:20,036 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=29, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T20:37:20,036 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-12T20:37:20,036 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734035840036"}]},"ts":"9223372036854775807"} 2024-12-12T20:37:20,036 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734035840036"}]},"ts":"9223372036854775807"} 2024-12-12T20:37:20,045 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-12T20:37:20,045 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 61ad8a9b414ee40aeebf5ddfbd8281f4, NAME => 'testtb-testExportWithTargetName,,1734035802793.61ad8a9b414ee40aeebf5ddfbd8281f4.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3966f962b82fb93920b9fa4a5587af0e, NAME => 'testtb-testExportWithTargetName,1,1734035802793.3966f962b82fb93920b9fa4a5587af0e.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T20:37:20,046 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-12T20:37:20,046 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734035840046"}]},"ts":"9223372036854775807"} 2024-12-12T20:37:20,050 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithTargetName state from META 2024-12-12T20:37:20,058 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=29, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T20:37:20,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithTargetName in 189 msec 2024-12-12T20:37:20,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-12T20:37:20,068 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName, procId: 29 completed 2024-12-12T20:37:20,114 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" 2024-12-12T20:37:20,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-12T20:37:20,132 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" 2024-12-12T20:37:20,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-12T20:37:20,181 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=760 (was 716) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:55822 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:59214 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1781300041_1 at /127.0.0.1:57356 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'JobHistoryServer' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:40049 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1305 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:37615 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:37615 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:57386 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40049 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 55141) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=804 (was 771) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1482 (was 1447) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=3351 (was 3868) 2024-12-12T20:37:20,181 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=760 is superior to 500 2024-12-12T20:37:20,207 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=760, OpenFileDescriptor=804, MaxFileDescriptor=1048576, SystemLoadAverage=1482, ProcessCount=17, AvailableMemoryMB=3347 2024-12-12T20:37:20,207 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=760 is superior to 500 2024-12-12T20:37:20,211 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:37:20,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T20:37:20,228 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:37:20,228 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:37:20,228 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 30 2024-12-12T20:37:20,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T20:37:20,235 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:37:20,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741898_1074 (size=404) 2024-12-12T20:37:20,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741898_1074 (size=404) 2024-12-12T20:37:20,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741898_1074 (size=404) 2024-12-12T20:37:20,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T20:37:20,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T20:37:20,696 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1cf4c430205ee121a94b18015bb08a6a, NAME => 'testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:37:20,698 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => d08da60ed6cea1345519d42919fe2917, NAME => 'testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:37:20,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741900_1076 (size=65) 2024-12-12T20:37:20,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741900_1076 (size=65) 2024-12-12T20:37:20,774 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:37:20,774 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing d08da60ed6cea1345519d42919fe2917, disabling compactions & flushes 2024-12-12T20:37:20,774 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:37:20,775 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:37:20,775 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. after waiting 0 ms 2024-12-12T20:37:20,775 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:37:20,775 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:37:20,775 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for d08da60ed6cea1345519d42919fe2917: 2024-12-12T20:37:20,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741900_1076 (size=65) 2024-12-12T20:37:20,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741899_1075 (size=65) 2024-12-12T20:37:20,784 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:37:20,784 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 1cf4c430205ee121a94b18015bb08a6a, disabling compactions & flushes 2024-12-12T20:37:20,784 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:20,785 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:20,785 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. after waiting 0 ms 2024-12-12T20:37:20,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741899_1075 (size=65) 2024-12-12T20:37:20,785 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:20,785 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:20,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741899_1075 (size=65) 2024-12-12T20:37:20,785 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1cf4c430205ee121a94b18015bb08a6a: 2024-12-12T20:37:20,787 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:37:20,788 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734035840787"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035840787"}]},"ts":"1734035840787"} 2024-12-12T20:37:20,789 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734035840787"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035840787"}]},"ts":"1734035840787"} 2024-12-12T20:37:20,799 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:37:20,801 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:37:20,802 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035840801"}]},"ts":"1734035840801"} 2024-12-12T20:37:20,804 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-12T20:37:20,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T20:37:20,859 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:37:20,872 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:37:20,872 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:37:20,872 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:37:20,872 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:37:20,872 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:37:20,872 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:37:20,873 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:37:20,873 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1cf4c430205ee121a94b18015bb08a6a, ASSIGN}, {pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d08da60ed6cea1345519d42919fe2917, ASSIGN}] 2024-12-12T20:37:20,880 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d08da60ed6cea1345519d42919fe2917, ASSIGN 2024-12-12T20:37:20,880 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1cf4c430205ee121a94b18015bb08a6a, ASSIGN 2024-12-12T20:37:20,881 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d08da60ed6cea1345519d42919fe2917, ASSIGN; state=OFFLINE, location=0a737713f6e3,46859,1734035782031; forceNewPlan=false, retain=false 2024-12-12T20:37:20,881 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1cf4c430205ee121a94b18015bb08a6a, ASSIGN; state=OFFLINE, location=0a737713f6e3,36817,1734035782356; forceNewPlan=false, retain=false 2024-12-12T20:37:21,032 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:37:21,033 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=d08da60ed6cea1345519d42919fe2917, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:37:21,035 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=1cf4c430205ee121a94b18015bb08a6a, regionState=OPENING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:37:21,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; OpenRegionProcedure d08da60ed6cea1345519d42919fe2917, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:37:21,044 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=31, state=RUNNABLE; OpenRegionProcedure 1cf4c430205ee121a94b18015bb08a6a, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:37:21,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-12T20:37:21,195 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:37:21,197 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:37:21,210 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:21,211 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7285): Opening region: {ENCODED => 1cf4c430205ee121a94b18015bb08a6a, NAME => 'testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T20:37:21,211 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. service=AccessControlService 2024-12-12T20:37:21,211 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:37:21,212 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:21,212 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:37:21,212 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7327): checking encryption for 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:21,212 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7330): checking classloading for 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:21,216 INFO [StoreOpener-1cf4c430205ee121a94b18015bb08a6a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:21,221 INFO [StoreOpener-1cf4c430205ee121a94b18015bb08a6a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1cf4c430205ee121a94b18015bb08a6a columnFamilyName cf 2024-12-12T20:37:21,221 DEBUG [StoreOpener-1cf4c430205ee121a94b18015bb08a6a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:37:21,222 INFO [StoreOpener-1cf4c430205ee121a94b18015bb08a6a-1 {}] regionserver.HStore(327): Store=1cf4c430205ee121a94b18015bb08a6a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:37:21,222 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:37:21,222 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => d08da60ed6cea1345519d42919fe2917, NAME => 'testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T20:37:21,223 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. service=AccessControlService 2024-12-12T20:37:21,223 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:21,223 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:37:21,223 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:21,223 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:37:21,224 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:21,224 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:21,224 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:21,226 INFO [StoreOpener-d08da60ed6cea1345519d42919fe2917-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:21,229 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1085): writing seq id for 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:21,236 INFO [StoreOpener-d08da60ed6cea1345519d42919fe2917-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d08da60ed6cea1345519d42919fe2917 columnFamilyName cf 2024-12-12T20:37:21,236 DEBUG [StoreOpener-d08da60ed6cea1345519d42919fe2917-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:37:21,241 INFO [StoreOpener-d08da60ed6cea1345519d42919fe2917-1 {}] regionserver.HStore(327): Store=d08da60ed6cea1345519d42919fe2917/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:37:21,249 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:21,252 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:37:21,254 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1102): Opened 1cf4c430205ee121a94b18015bb08a6a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72546717, jitterRate=0.08103032410144806}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:37:21,255 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:21,256 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1001): Region open journal for 1cf4c430205ee121a94b18015bb08a6a: 2024-12-12T20:37:21,258 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a., pid=34, masterSystemTime=1734035841197 2024-12-12T20:37:21,264 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:21,264 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:21,268 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=1cf4c430205ee121a94b18015bb08a6a, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:37:21,268 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:21,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=31 2024-12-12T20:37:21,288 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:37:21,289 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=31, state=SUCCESS; OpenRegionProcedure 1cf4c430205ee121a94b18015bb08a6a, server=0a737713f6e3,36817,1734035782356 in 230 msec 2024-12-12T20:37:21,291 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened d08da60ed6cea1345519d42919fe2917; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75416283, jitterRate=0.1237901896238327}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:37:21,292 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for d08da60ed6cea1345519d42919fe2917: 2024-12-12T20:37:21,293 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1cf4c430205ee121a94b18015bb08a6a, ASSIGN in 413 msec 2024-12-12T20:37:21,294 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917., pid=33, masterSystemTime=1734035841195 2024-12-12T20:37:21,300 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:37:21,300 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:37:21,304 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=d08da60ed6cea1345519d42919fe2917, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:37:21,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-12T20:37:21,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; OpenRegionProcedure d08da60ed6cea1345519d42919fe2917, server=0a737713f6e3,46859,1734035782031 in 269 msec 2024-12-12T20:37:21,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=30 2024-12-12T20:37:21,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d08da60ed6cea1345519d42919fe2917, ASSIGN in 445 msec 2024-12-12T20:37:21,323 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:37:21,323 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035841323"}]},"ts":"1734035841323"} 2024-12-12T20:37:21,325 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-12T20:37:21,335 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:37:21,336 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-12T20:37:21,340 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-12T20:37:21,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:21,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:21,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:21,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T20:37:21,359 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:21,363 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:21,363 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:21,366 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:21,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithResetTtl in 1.1460 sec 2024-12-12T20:37:22,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T20:37:22,360 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl, procId: 30 completed 2024-12-12T20:37:22,360 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-12T20:37:22,360 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:37:22,368 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-12T20:37:22,368 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:37:22,368 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-12T20:37:22,392 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-12T20:37:22,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035842392 (current time:1734035842392). 2024-12-12T20:37:22,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:37:22,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-12T20:37:22,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:37:22,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x39567832 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4955e4af 2024-12-12T20:37:22,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b28b670, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:37:22,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:37:22,464 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40816, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:37:22,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x39567832 to 127.0.0.1:52655 2024-12-12T20:37:22,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:37:22,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65b880c8 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3ae704c 2024-12-12T20:37:22,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1bcd7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:37:22,542 DEBUG [hconnection-0x6de9b878-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:37:22,544 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40828, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:37:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65b880c8 to 127.0.0.1:52655 2024-12-12T20:37:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:37:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-12T20:37:22,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:37:22,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-12T20:37:22,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-12T20:37:22,555 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:37:22,556 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:37:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T20:37:22,573 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:37:22,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741901_1077 (size=161) 2024-12-12T20:37:22,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741901_1077 (size=161) 2024-12-12T20:37:22,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741901_1077 (size=161) 2024-12-12T20:37:22,606 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:37:22,606 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 1cf4c430205ee121a94b18015bb08a6a}, {pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure d08da60ed6cea1345519d42919fe2917}] 2024-12-12T20:37:22,609 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:22,609 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:22,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T20:37:22,767 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:37:22,768 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:37:22,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=36 2024-12-12T20:37:22,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=37 2024-12-12T20:37:22,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:22,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.HRegion(2538): Flush status journal for 1cf4c430205ee121a94b18015bb08a6a: 2024-12-12T20:37:22,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-12T20:37:22,775 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-12T20:37:22,775 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:37:22,775 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:37:22,780 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:37:22,780 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for d08da60ed6cea1345519d42919fe2917: 2024-12-12T20:37:22,780 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-12T20:37:22,781 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-12T20:37:22,781 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:37:22,781 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:37:22,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741902_1078 (size=68) 2024-12-12T20:37:22,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741902_1078 (size=68) 2024-12-12T20:37:22,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741902_1078 (size=68) 2024-12-12T20:37:22,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741903_1079 (size=68) 2024-12-12T20:37:22,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741903_1079 (size=68) 2024-12-12T20:37:22,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741903_1079 (size=68) 2024-12-12T20:37:22,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:22,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=36 2024-12-12T20:37:22,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=36 2024-12-12T20:37:22,860 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:22,860 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:22,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; SnapshotRegionProcedure 1cf4c430205ee121a94b18015bb08a6a in 258 msec 2024-12-12T20:37:22,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T20:37:23,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T20:37:23,257 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:37:23,257 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-12-12T20:37:23,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-12-12T20:37:23,257 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:23,258 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:23,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=35 2024-12-12T20:37:23,266 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:37:23,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=35, state=SUCCESS; SnapshotRegionProcedure d08da60ed6cea1345519d42919fe2917 in 653 msec 2024-12-12T20:37:23,268 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:37:23,270 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:37:23,270 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-12T20:37:23,272 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-12T20:37:23,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741904_1080 (size=543) 2024-12-12T20:37:23,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741904_1080 (size=543) 2024-12-12T20:37:23,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741904_1080 (size=543) 2024-12-12T20:37:23,431 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:37:23,479 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:37:23,482 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-12T20:37:23,494 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:37:23,494 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-12T20:37:23,505 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 943 msec 2024-12-12T20:37:23,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T20:37:23,689 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 35 completed 2024-12-12T20:37:23,720 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46859 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:37:23,729 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36817 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:37:23,736 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-12T20:37:23,736 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:23,743 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:37:23,747 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0001_000001 (auth:SIMPLE) from 127.0.0.1:58710 2024-12-12T20:37:23,762 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_3/usercache/jenkins/appcache/application_1734035798001_0001/container_1734035798001_0001_01_000001/launch_container.sh] 2024-12-12T20:37:23,762 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_3/usercache/jenkins/appcache/application_1734035798001_0001/container_1734035798001_0001_01_000001/container_tokens] 2024-12-12T20:37:23,762 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_3/usercache/jenkins/appcache/application_1734035798001_0001/container_1734035798001_0001_01_000001/sysfs] 2024-12-12T20:37:23,828 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-12T20:37:23,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035843828 (current time:1734035843828). 2024-12-12T20:37:23,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:37:23,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-12T20:37:23,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:37:23,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4fa9acc0 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6fa3e735 2024-12-12T20:37:23,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@157171af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:37:23,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:37:23,896 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:37:23,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4fa9acc0 to 127.0.0.1:52655 2024-12-12T20:37:23,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:37:23,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12856d7d to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6dc238da 2024-12-12T20:37:24,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@720e67f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:37:24,304 DEBUG [hconnection-0xef68583-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:37:24,306 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40834, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:37:24,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12856d7d to 127.0.0.1:52655 2024-12-12T20:37:24,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:37:24,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-12T20:37:24,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:37:24,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-12T20:37:24,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-12T20:37:24,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-12T20:37:24,336 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:37:24,339 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:37:24,350 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:37:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-12T20:37:24,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741905_1081 (size=156) 2024-12-12T20:37:24,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741905_1081 (size=156) 2024-12-12T20:37:24,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741905_1081 (size=156) 2024-12-12T20:37:24,439 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:37:24,439 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 1cf4c430205ee121a94b18015bb08a6a}, {pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure d08da60ed6cea1345519d42919fe2917}] 2024-12-12T20:37:24,441 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:24,442 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:24,594 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:37:24,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=40 2024-12-12T20:37:24,595 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:37:24,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:37:24,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=39 2024-12-12T20:37:24,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:24,596 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing d08da60ed6cea1345519d42919fe2917 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-12T20:37:24,596 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2837): Flushing 1cf4c430205ee121a94b18015bb08a6a 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-12T20:37:24,620 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/.tmp/cf/0ddf1a2af1eb4acbbbc8272bf17e9cde is 71, key is 14cb65a62ed19256b096568afbc039ef/cf:q/1734035843720/Put/seqid=0 2024-12-12T20:37:24,620 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/.tmp/cf/2bc1b8f3b73d4ad685c47fb3bb44c2f7 is 71, key is 084e2b6d45212fae14254b13f7aa61b6/cf:q/1734035843729/Put/seqid=0 2024-12-12T20:37:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-12T20:37:24,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741907_1083 (size=5216) 2024-12-12T20:37:24,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741907_1083 (size=5216) 2024-12-12T20:37:24,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741907_1083 (size=5216) 2024-12-12T20:37:24,639 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/.tmp/cf/2bc1b8f3b73d4ad685c47fb3bb44c2f7 2024-12-12T20:37:24,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/.tmp/cf/2bc1b8f3b73d4ad685c47fb3bb44c2f7 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/cf/2bc1b8f3b73d4ad685c47fb3bb44c2f7 2024-12-12T20:37:24,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741906_1082 (size=8392) 2024-12-12T20:37:24,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741906_1082 (size=8392) 2024-12-12T20:37:24,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741906_1082 (size=8392) 2024-12-12T20:37:24,676 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/.tmp/cf/0ddf1a2af1eb4acbbbc8272bf17e9cde 2024-12-12T20:37:24,678 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/cf/2bc1b8f3b73d4ad685c47fb3bb44c2f7, entries=2, sequenceid=6, filesize=5.1 K 2024-12-12T20:37:24,679 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 1cf4c430205ee121a94b18015bb08a6a in 83ms, sequenceid=6, compaction requested=false 2024-12-12T20:37:24,679 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-12T20:37:24,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2538): Flush status journal for 1cf4c430205ee121a94b18015bb08a6a: 2024-12-12T20:37:24,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. for snaptb0-testExportWithResetTtl completed. 2024-12-12T20:37:24,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-12T20:37:24,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:37:24,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/cf/2bc1b8f3b73d4ad685c47fb3bb44c2f7] hfiles 2024-12-12T20:37:24,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/cf/2bc1b8f3b73d4ad685c47fb3bb44c2f7 for snapshot=snaptb0-testExportWithResetTtl 2024-12-12T20:37:24,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/.tmp/cf/0ddf1a2af1eb4acbbbc8272bf17e9cde as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/cf/0ddf1a2af1eb4acbbbc8272bf17e9cde 2024-12-12T20:37:24,712 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/cf/0ddf1a2af1eb4acbbbc8272bf17e9cde, entries=48, sequenceid=6, filesize=8.2 K 2024-12-12T20:37:24,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741908_1084 (size=107) 2024-12-12T20:37:24,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741908_1084 (size=107) 2024-12-12T20:37:24,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741908_1084 (size=107) 2024-12-12T20:37:24,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:37:24,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=39 2024-12-12T20:37:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=39 2024-12-12T20:37:24,720 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:24,720 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:37:24,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; SnapshotRegionProcedure 1cf4c430205ee121a94b18015bb08a6a in 282 msec 2024-12-12T20:37:24,729 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for d08da60ed6cea1345519d42919fe2917 in 131ms, sequenceid=6, compaction requested=false 2024-12-12T20:37:24,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for d08da60ed6cea1345519d42919fe2917: 2024-12-12T20:37:24,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. for snaptb0-testExportWithResetTtl completed. 2024-12-12T20:37:24,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-12T20:37:24,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:37:24,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/cf/0ddf1a2af1eb4acbbbc8272bf17e9cde] hfiles 2024-12-12T20:37:24,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/cf/0ddf1a2af1eb4acbbbc8272bf17e9cde for snapshot=snaptb0-testExportWithResetTtl 2024-12-12T20:37:24,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741909_1085 (size=107) 2024-12-12T20:37:24,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741909_1085 (size=107) 2024-12-12T20:37:24,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741909_1085 (size=107) 2024-12-12T20:37:24,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:37:24,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-12T20:37:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-12T20:37:24,746 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:24,746 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure d08da60ed6cea1345519d42919fe2917 2024-12-12T20:37:24,755 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=38 2024-12-12T20:37:24,755 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; SnapshotRegionProcedure d08da60ed6cea1345519d42919fe2917 in 308 msec 2024-12-12T20:37:24,756 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:37:24,760 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:37:24,761 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:37:24,761 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-12T20:37:24,762 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-12T20:37:24,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741910_1086 (size=621) 2024-12-12T20:37:24,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741910_1086 (size=621) 2024-12-12T20:37:24,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741910_1086 (size=621) 2024-12-12T20:37:24,897 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:37:24,905 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:37:24,905 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-12T20:37:24,907 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:37:24,907 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-12T20:37:24,909 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 584 msec 2024-12-12T20:37:24,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-12T20:37:24,933 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 38 completed 2024-12-12T20:37:24,934 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:37:24,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportWithResetTtl 2024-12-12T20:37:24,937 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:37:24,937 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:37:24,937 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 41 2024-12-12T20:37:24,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T20:37:24,938 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:37:24,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741911_1087 (size=397) 2024-12-12T20:37:24,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741911_1087 (size=397) 2024-12-12T20:37:24,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741911_1087 (size=397) 2024-12-12T20:37:24,955 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 68b62c0066b785f26c8f5639ed838724, NAME => 'testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:37:24,956 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 28e9de42dab24f1a07f10c9f5a1c3fae, NAME => 'testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:37:24,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741912_1088 (size=58) 2024-12-12T20:37:24,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741912_1088 (size=58) 2024-12-12T20:37:24,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741912_1088 (size=58) 2024-12-12T20:37:24,973 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:37:24,973 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 28e9de42dab24f1a07f10c9f5a1c3fae, disabling compactions & flushes 2024-12-12T20:37:24,974 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:37:24,974 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:37:24,974 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. after waiting 0 ms 2024-12-12T20:37:24,974 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:37:24,974 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:37:24,974 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 28e9de42dab24f1a07f10c9f5a1c3fae: 2024-12-12T20:37:24,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741913_1089 (size=58) 2024-12-12T20:37:24,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741913_1089 (size=58) 2024-12-12T20:37:24,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741913_1089 (size=58) 2024-12-12T20:37:24,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:37:24,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 68b62c0066b785f26c8f5639ed838724, disabling compactions & flushes 2024-12-12T20:37:24,994 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:37:24,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:37:24,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. after waiting 0 ms 2024-12-12T20:37:24,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:37:24,994 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:37:24,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 68b62c0066b785f26c8f5639ed838724: 2024-12-12T20:37:24,996 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:37:24,997 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734035844996"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035844996"}]},"ts":"1734035844996"} 2024-12-12T20:37:24,997 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734035844996"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035844996"}]},"ts":"1734035844996"} 2024-12-12T20:37:25,000 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:37:25,002 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:37:25,002 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035845002"}]},"ts":"1734035845002"} 2024-12-12T20:37:25,004 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-12T20:37:25,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T20:37:25,168 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:37:25,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T20:37:25,471 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:37:25,472 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:37:25,472 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:37:25,472 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:37:25,472 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:37:25,473 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:37:25,473 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:37:25,473 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:37:25,473 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=68b62c0066b785f26c8f5639ed838724, ASSIGN}, {pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=28e9de42dab24f1a07f10c9f5a1c3fae, ASSIGN}] 2024-12-12T20:37:25,474 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=28e9de42dab24f1a07f10c9f5a1c3fae, ASSIGN 2024-12-12T20:37:25,474 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=68b62c0066b785f26c8f5639ed838724, ASSIGN 2024-12-12T20:37:25,475 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=28e9de42dab24f1a07f10c9f5a1c3fae, ASSIGN; state=OFFLINE, location=0a737713f6e3,36817,1734035782356; forceNewPlan=false, retain=false 2024-12-12T20:37:25,475 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=68b62c0066b785f26c8f5639ed838724, ASSIGN; state=OFFLINE, location=0a737713f6e3,42407,1734035781742; forceNewPlan=false, retain=false 2024-12-12T20:37:25,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T20:37:25,625 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:37:25,626 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=68b62c0066b785f26c8f5639ed838724, regionState=OPENING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:37:25,626 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=28e9de42dab24f1a07f10c9f5a1c3fae, regionState=OPENING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:37:25,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure 68b62c0066b785f26c8f5639ed838724, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:37:25,630 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=43, state=RUNNABLE; OpenRegionProcedure 28e9de42dab24f1a07f10c9f5a1c3fae, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:37:25,781 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:37:25,784 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:37:25,791 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:37:25,791 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 68b62c0066b785f26c8f5639ed838724, NAME => 'testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T20:37:25,792 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. service=AccessControlService 2024-12-12T20:37:25,792 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:37:25,792 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 68b62c0066b785f26c8f5639ed838724 2024-12-12T20:37:25,792 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:37:25,792 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:37:25,792 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 68b62c0066b785f26c8f5639ed838724 2024-12-12T20:37:25,792 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => 28e9de42dab24f1a07f10c9f5a1c3fae, NAME => 'testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T20:37:25,793 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 68b62c0066b785f26c8f5639ed838724 2024-12-12T20:37:25,793 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. service=AccessControlService 2024-12-12T20:37:25,793 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:37:25,793 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:37:25,793 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:37:25,793 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for 28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:37:25,793 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for 28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:37:25,799 INFO [StoreOpener-68b62c0066b785f26c8f5639ed838724-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 68b62c0066b785f26c8f5639ed838724 2024-12-12T20:37:25,804 INFO [StoreOpener-68b62c0066b785f26c8f5639ed838724-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 68b62c0066b785f26c8f5639ed838724 columnFamilyName cf 2024-12-12T20:37:25,805 DEBUG [StoreOpener-68b62c0066b785f26c8f5639ed838724-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:37:25,812 INFO [StoreOpener-68b62c0066b785f26c8f5639ed838724-1 {}] regionserver.HStore(327): Store=68b62c0066b785f26c8f5639ed838724/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:37:25,814 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724 2024-12-12T20:37:25,814 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724 2024-12-12T20:37:25,818 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 68b62c0066b785f26c8f5639ed838724 2024-12-12T20:37:25,823 INFO [StoreOpener-28e9de42dab24f1a07f10c9f5a1c3fae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:37:25,827 INFO [StoreOpener-28e9de42dab24f1a07f10c9f5a1c3fae-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28e9de42dab24f1a07f10c9f5a1c3fae columnFamilyName cf 2024-12-12T20:37:25,828 DEBUG [StoreOpener-28e9de42dab24f1a07f10c9f5a1c3fae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:37:25,830 INFO [StoreOpener-28e9de42dab24f1a07f10c9f5a1c3fae-1 {}] regionserver.HStore(327): Store=28e9de42dab24f1a07f10c9f5a1c3fae/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:37:25,832 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:37:25,833 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:37:25,838 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for 28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:37:25,846 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:37:25,847 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 68b62c0066b785f26c8f5639ed838724; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71918495, jitterRate=0.0716690868139267}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:37:25,848 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 68b62c0066b785f26c8f5639ed838724: 2024-12-12T20:37:25,850 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724., pid=44, masterSystemTime=1734035845780 2024-12-12T20:37:25,852 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:37:25,853 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened 28e9de42dab24f1a07f10c9f5a1c3fae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64014109, jitterRate=-0.046115443110466}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:37:25,853 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for 28e9de42dab24f1a07f10c9f5a1c3fae: 2024-12-12T20:37:25,854 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae., pid=45, masterSystemTime=1734035845784 2024-12-12T20:37:25,857 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:37:25,857 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:37:25,858 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=68b62c0066b785f26c8f5639ed838724, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:37:25,859 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:37:25,859 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:37:25,860 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=28e9de42dab24f1a07f10c9f5a1c3fae, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:37:25,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-12-12T20:37:25,874 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure 68b62c0066b785f26c8f5639ed838724, server=0a737713f6e3,42407,1734035781742 in 233 msec 2024-12-12T20:37:25,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=43 2024-12-12T20:37:25,876 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=68b62c0066b785f26c8f5639ed838724, ASSIGN in 399 msec 2024-12-12T20:37:25,876 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=43, state=SUCCESS; OpenRegionProcedure 28e9de42dab24f1a07f10c9f5a1c3fae, server=0a737713f6e3,36817,1734035782356 in 232 msec 2024-12-12T20:37:25,880 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=41 2024-12-12T20:37:25,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=28e9de42dab24f1a07f10c9f5a1c3fae, ASSIGN in 401 msec 2024-12-12T20:37:25,882 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:37:25,882 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035845882"}]},"ts":"1734035845882"} 2024-12-12T20:37:25,885 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-12T20:37:25,953 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-12T20:37:26,025 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:37:26,025 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-12T20:37:26,028 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-12T20:37:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T20:37:26,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:26,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:26,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:26,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:37:26,312 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:26,312 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:26,313 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:26,313 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:26,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:26,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:26,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:26,315 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:37:26,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=testExportWithResetTtl in 1.3780 sec 2024-12-12T20:37:27,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T20:37:27,048 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportWithResetTtl, procId: 41 completed 2024-12-12T20:37:27,048 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-12T20:37:27,049 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:37:27,055 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-12T20:37:27,055 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:37:27,055 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportWithResetTtl assigned. 2024-12-12T20:37:27,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42407 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:37:27,126 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36817 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:37:27,149 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportWithResetTtl 2024-12-12T20:37:27,150 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:37:27,150 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:37:27,223 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-12T20:37:27,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035847223 (current time:1734035847223). 2024-12-12T20:37:27,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-12T20:37:27,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:37:27,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c4a0d2c to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@652cff8a 2024-12-12T20:37:27,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b9b921e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:37:27,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:37:27,247 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:37:27,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c4a0d2c to 127.0.0.1:52655 2024-12-12T20:37:27,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:37:27,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41249566 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4376f267 2024-12-12T20:37:27,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@546a9375, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:37:27,292 DEBUG [hconnection-0x5776669c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:37:27,294 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40838, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:37:27,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41249566 to 127.0.0.1:52655 2024-12-12T20:37:27,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:37:27,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-12T20:37:27,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:37:27,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-12T20:37:27,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-12T20:37:27,315 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:37:27,317 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:37:27,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-12T20:37:27,325 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:37:27,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-12T20:37:27,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741914_1090 (size=143) 2024-12-12T20:37:27,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741914_1090 (size=143) 2024-12-12T20:37:27,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741914_1090 (size=143) 2024-12-12T20:37:27,431 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:37:27,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 68b62c0066b785f26c8f5639ed838724}, {pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 28e9de42dab24f1a07f10c9f5a1c3fae}] 2024-12-12T20:37:27,433 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:37:27,433 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 68b62c0066b785f26c8f5639ed838724 2024-12-12T20:37:27,586 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:37:27,586 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:37:27,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=48 2024-12-12T20:37:27,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:37:27,589 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 28e9de42dab24f1a07f10c9f5a1c3fae 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-12T20:37:27,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=47 2024-12-12T20:37:27,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:37:27,608 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2837): Flushing 68b62c0066b785f26c8f5639ed838724 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-12T20:37:27,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-12T20:37:27,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/.tmp/cf/f594e9df5a994858950c9ec54e9f8308 is 71, key is 0ed34627357fda23044dab12c4407148/cf:q/1734035847108/Put/seqid=0 2024-12-12T20:37:27,643 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/.tmp/cf/411ff41b23014525a27a018df367261f is 71, key is 197cb90321456245d832587a2e329d60/cf:q/1734035847126/Put/seqid=0 2024-12-12T20:37:27,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741915_1091 (size=8394) 2024-12-12T20:37:27,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741915_1091 (size=8394) 2024-12-12T20:37:27,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741915_1091 (size=8394) 2024-12-12T20:37:27,707 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/.tmp/cf/411ff41b23014525a27a018df367261f 2024-12-12T20:37:27,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/.tmp/cf/411ff41b23014525a27a018df367261f as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/cf/411ff41b23014525a27a018df367261f 2024-12-12T20:37:27,734 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 28e9de42dab24f1a07f10c9f5a1c3fae changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:37:27,748 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1cf4c430205ee121a94b18015bb08a6a changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:37:27,748 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 68b62c0066b785f26c8f5639ed838724 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:37:27,749 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 039d8ca4d0468f0362e2baff27ee7042 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:37:27,749 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 419ba590b58c1bffeff3e689922ee9a3 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:37:27,749 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region d08da60ed6cea1345519d42919fe2917 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:37:27,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741916_1092 (size=5216) 2024-12-12T20:37:27,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741916_1092 (size=5216) 2024-12-12T20:37:27,767 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/.tmp/cf/f594e9df5a994858950c9ec54e9f8308 2024-12-12T20:37:27,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741916_1092 (size=5216) 2024-12-12T20:37:27,774 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/cf/411ff41b23014525a27a018df367261f, entries=48, sequenceid=5, filesize=8.2 K 2024-12-12T20:37:27,777 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 28e9de42dab24f1a07f10c9f5a1c3fae in 189ms, sequenceid=5, compaction requested=false 2024-12-12T20:37:27,777 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 28e9de42dab24f1a07f10c9f5a1c3fae: 2024-12-12T20:37:27,777 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. for snaptb-testExportWithResetTtl completed. 2024-12-12T20:37:27,777 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-12T20:37:27,777 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:37:27,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/cf/411ff41b23014525a27a018df367261f] hfiles 2024-12-12T20:37:27,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/cf/411ff41b23014525a27a018df367261f for snapshot=snaptb-testExportWithResetTtl 2024-12-12T20:37:27,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/.tmp/cf/f594e9df5a994858950c9ec54e9f8308 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/cf/f594e9df5a994858950c9ec54e9f8308 2024-12-12T20:37:27,799 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/cf/f594e9df5a994858950c9ec54e9f8308, entries=2, sequenceid=5, filesize=5.1 K 2024-12-12T20:37:27,804 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 68b62c0066b785f26c8f5639ed838724 in 196ms, sequenceid=5, compaction requested=false 2024-12-12T20:37:27,804 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2538): Flush status journal for 68b62c0066b785f26c8f5639ed838724: 2024-12-12T20:37:27,804 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. for snaptb-testExportWithResetTtl completed. 2024-12-12T20:37:27,804 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-12T20:37:27,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:37:27,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/cf/f594e9df5a994858950c9ec54e9f8308] hfiles 2024-12-12T20:37:27,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/cf/f594e9df5a994858950c9ec54e9f8308 for snapshot=snaptb-testExportWithResetTtl 2024-12-12T20:37:27,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741917_1093 (size=100) 2024-12-12T20:37:27,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741917_1093 (size=100) 2024-12-12T20:37:27,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741917_1093 (size=100) 2024-12-12T20:37:27,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:37:27,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-12T20:37:27,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-12T20:37:27,859 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:37:27,859 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:37:27,864 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; SnapshotRegionProcedure 28e9de42dab24f1a07f10c9f5a1c3fae in 431 msec 2024-12-12T20:37:27,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741918_1094 (size=100) 2024-12-12T20:37:27,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741918_1094 (size=100) 2024-12-12T20:37:27,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741918_1094 (size=100) 2024-12-12T20:37:27,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:37:27,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=47 2024-12-12T20:37:27,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=47 2024-12-12T20:37:27,883 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 68b62c0066b785f26c8f5639ed838724 2024-12-12T20:37:27,883 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 68b62c0066b785f26c8f5639ed838724 2024-12-12T20:37:27,891 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-12-12T20:37:27,891 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:37:27,891 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; SnapshotRegionProcedure 68b62c0066b785f26c8f5639ed838724 in 454 msec 2024-12-12T20:37:27,892 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:37:27,897 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:37:27,897 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-12T20:37:27,898 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-12T20:37:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-12T20:37:27,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741919_1095 (size=600) 2024-12-12T20:37:27,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741919_1095 (size=600) 2024-12-12T20:37:27,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741919_1095 (size=600) 2024-12-12T20:37:27,987 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:37:28,015 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:37:28,016 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-12T20:37:28,021 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:37:28,022 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-12T20:37:28,030 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 710 msec 2024-12-12T20:37:28,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-12T20:37:28,426 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl, procId: 46 completed 2024-12-12T20:37:28,438 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035848438 2024-12-12T20:37:28,438 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37615, tgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035848438, rawTgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035848438, srcFsUri=hdfs://localhost:37615, srcDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:37:28,476 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37615, inputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:37:28,476 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035848438, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035848438/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-12T20:37:28,479 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-12T20:37:28,501 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035848438/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-12T20:37:28,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741921_1097 (size=600) 2024-12-12T20:37:28,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741921_1097 (size=600) 2024-12-12T20:37:28,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741921_1097 (size=600) 2024-12-12T20:37:28,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741920_1096 (size=143) 2024-12-12T20:37:28,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741920_1096 (size=143) 2024-12-12T20:37:28,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741920_1096 (size=143) 2024-12-12T20:37:28,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741922_1098 (size=141) 2024-12-12T20:37:28,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741922_1098 (size=141) 2024-12-12T20:37:28,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741922_1098 (size=141) 2024-12-12T20:37:28,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:28,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:28,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:28,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:29,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-2826847624661593407.jar 2024-12-12T20:37:29,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:29,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:29,967 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-2788216942749049417.jar 2024-12-12T20:37:29,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:29,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:29,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:29,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:29,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:29,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-12T20:37:29,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T20:37:29,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T20:37:29,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T20:37:29,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T20:37:29,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T20:37:29,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T20:37:29,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-12T20:37:29,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T20:37:29,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T20:37:29,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T20:37:29,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T20:37:29,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T20:37:29,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:37:29,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:37:29,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:37:29,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:37:29,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:37:29,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:37:29,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:37:30,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741923_1099 (size=127628) 2024-12-12T20:37:30,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741923_1099 (size=127628) 2024-12-12T20:37:30,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741923_1099 (size=127628) 2024-12-12T20:37:30,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741924_1100 (size=2172101) 2024-12-12T20:37:30,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741924_1100 (size=2172101) 2024-12-12T20:37:30,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741924_1100 (size=2172101) 2024-12-12T20:37:30,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741925_1101 (size=213228) 2024-12-12T20:37:30,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741925_1101 (size=213228) 2024-12-12T20:37:30,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741925_1101 (size=213228) 2024-12-12T20:37:30,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741926_1102 (size=1877034) 2024-12-12T20:37:30,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741926_1102 (size=1877034) 2024-12-12T20:37:30,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741926_1102 (size=1877034) 2024-12-12T20:37:31,305 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:37:31,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741927_1103 (size=533455) 2024-12-12T20:37:31,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741927_1103 (size=533455) 2024-12-12T20:37:31,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741927_1103 (size=533455) 2024-12-12T20:37:31,391 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-12T20:37:31,391 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-12T20:37:31,392 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-12T20:37:31,392 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-12T20:37:31,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741928_1104 (size=7280644) 2024-12-12T20:37:31,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741928_1104 (size=7280644) 2024-12-12T20:37:31,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741928_1104 (size=7280644) 2024-12-12T20:37:31,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741929_1105 (size=4188619) 2024-12-12T20:37:31,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741929_1105 (size=4188619) 2024-12-12T20:37:31,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741929_1105 (size=4188619) 2024-12-12T20:37:32,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741930_1106 (size=20406) 2024-12-12T20:37:32,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741930_1106 (size=20406) 2024-12-12T20:37:32,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741930_1106 (size=20406) 2024-12-12T20:37:32,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741931_1107 (size=75495) 2024-12-12T20:37:32,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741931_1107 (size=75495) 2024-12-12T20:37:32,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741931_1107 (size=75495) 2024-12-12T20:37:32,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741932_1108 (size=45609) 2024-12-12T20:37:32,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741932_1108 (size=45609) 2024-12-12T20:37:32,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741932_1108 (size=45609) 2024-12-12T20:37:32,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741933_1109 (size=110084) 2024-12-12T20:37:32,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741933_1109 (size=110084) 2024-12-12T20:37:32,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741933_1109 (size=110084) 2024-12-12T20:37:32,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741934_1110 (size=1323991) 2024-12-12T20:37:32,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741934_1110 (size=1323991) 2024-12-12T20:37:32,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741934_1110 (size=1323991) 2024-12-12T20:37:32,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741935_1111 (size=23076) 2024-12-12T20:37:32,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741935_1111 (size=23076) 2024-12-12T20:37:32,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741935_1111 (size=23076) 2024-12-12T20:37:32,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741936_1112 (size=126803) 2024-12-12T20:37:32,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741936_1112 (size=126803) 2024-12-12T20:37:32,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741936_1112 (size=126803) 2024-12-12T20:37:33,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741937_1113 (size=322274) 2024-12-12T20:37:33,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741937_1113 (size=322274) 2024-12-12T20:37:33,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741937_1113 (size=322274) 2024-12-12T20:37:33,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741938_1114 (size=1832290) 2024-12-12T20:37:33,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741938_1114 (size=1832290) 2024-12-12T20:37:33,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741938_1114 (size=1832290) 2024-12-12T20:37:33,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741939_1115 (size=6350857) 2024-12-12T20:37:33,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741939_1115 (size=6350857) 2024-12-12T20:37:33,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741939_1115 (size=6350857) 2024-12-12T20:37:33,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741940_1116 (size=30081) 2024-12-12T20:37:33,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741940_1116 (size=30081) 2024-12-12T20:37:33,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741940_1116 (size=30081) 2024-12-12T20:37:33,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741941_1117 (size=53616) 2024-12-12T20:37:33,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741941_1117 (size=53616) 2024-12-12T20:37:33,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741941_1117 (size=53616) 2024-12-12T20:37:33,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741942_1118 (size=451756) 2024-12-12T20:37:33,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741942_1118 (size=451756) 2024-12-12T20:37:33,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741942_1118 (size=451756) 2024-12-12T20:37:33,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741943_1119 (size=29229) 2024-12-12T20:37:33,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741943_1119 (size=29229) 2024-12-12T20:37:33,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741943_1119 (size=29229) 2024-12-12T20:37:33,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741944_1120 (size=169089) 2024-12-12T20:37:33,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741944_1120 (size=169089) 2024-12-12T20:37:33,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741944_1120 (size=169089) 2024-12-12T20:37:33,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741945_1121 (size=5175431) 2024-12-12T20:37:33,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741945_1121 (size=5175431) 2024-12-12T20:37:33,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741945_1121 (size=5175431) 2024-12-12T20:37:34,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741946_1122 (size=136454) 2024-12-12T20:37:34,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741946_1122 (size=136454) 2024-12-12T20:37:34,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741946_1122 (size=136454) 2024-12-12T20:37:34,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741947_1123 (size=907861) 2024-12-12T20:37:34,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741947_1123 (size=907861) 2024-12-12T20:37:34,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741947_1123 (size=907861) 2024-12-12T20:37:34,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741948_1124 (size=3317408) 2024-12-12T20:37:34,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741948_1124 (size=3317408) 2024-12-12T20:37:34,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741948_1124 (size=3317408) 2024-12-12T20:37:34,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741949_1125 (size=503880) 2024-12-12T20:37:34,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741949_1125 (size=503880) 2024-12-12T20:37:34,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741949_1125 (size=503880) 2024-12-12T20:37:34,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741950_1126 (size=4695811) 2024-12-12T20:37:34,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741950_1126 (size=4695811) 2024-12-12T20:37:34,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741950_1126 (size=4695811) 2024-12-12T20:37:34,894 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T20:37:34,898 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-12T20:37:34,902 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-12T20:37:34,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741951_1127 (size=324) 2024-12-12T20:37:34,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741951_1127 (size=324) 2024-12-12T20:37:34,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741951_1127 (size=324) 2024-12-12T20:37:34,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741952_1128 (size=15) 2024-12-12T20:37:34,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741952_1128 (size=15) 2024-12-12T20:37:34,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741952_1128 (size=15) 2024-12-12T20:37:35,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741953_1129 (size=304877) 2024-12-12T20:37:35,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741953_1129 (size=304877) 2024-12-12T20:37:35,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741953_1129 (size=304877) 2024-12-12T20:37:35,471 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:37:35,471 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:37:35,765 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0002_000001 (auth:SIMPLE) from 127.0.0.1:38590 2024-12-12T20:37:49,297 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:37:50,390 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0002_000001 (auth:SIMPLE) from 127.0.0.1:56274 2024-12-12T20:37:51,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741954_1130 (size=350551) 2024-12-12T20:37:51,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741954_1130 (size=350551) 2024-12-12T20:37:51,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741954_1130 (size=350551) 2024-12-12T20:37:53,199 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0002_000001 (auth:SIMPLE) from 127.0.0.1:37428 2024-12-12T20:38:02,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741955_1131 (size=8394) 2024-12-12T20:38:02,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741955_1131 (size=8394) 2024-12-12T20:38:02,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741955_1131 (size=8394) 2024-12-12T20:38:03,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741956_1132 (size=5216) 2024-12-12T20:38:03,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741956_1132 (size=5216) 2024-12-12T20:38:03,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741956_1132 (size=5216) 2024-12-12T20:38:03,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741957_1133 (size=17402) 2024-12-12T20:38:03,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741957_1133 (size=17402) 2024-12-12T20:38:03,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741957_1133 (size=17402) 2024-12-12T20:38:03,636 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_2/usercache/jenkins/appcache/application_1734035798001_0002/container_1734035798001_0002_01_000002/launch_container.sh] 2024-12-12T20:38:03,636 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_2/usercache/jenkins/appcache/application_1734035798001_0002/container_1734035798001_0002_01_000002/container_tokens] 2024-12-12T20:38:03,637 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_2/usercache/jenkins/appcache/application_1734035798001_0002/container_1734035798001_0002_01_000002/sysfs] 2024-12-12T20:38:04,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741958_1134 (size=462) 2024-12-12T20:38:04,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741958_1134 (size=462) 2024-12-12T20:38:04,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741958_1134 (size=462) 2024-12-12T20:38:04,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741959_1135 (size=17402) 2024-12-12T20:38:04,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741959_1135 (size=17402) 2024-12-12T20:38:04,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741959_1135 (size=17402) 2024-12-12T20:38:04,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741960_1136 (size=350551) 2024-12-12T20:38:04,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741960_1136 (size=350551) 2024-12-12T20:38:04,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741960_1136 (size=350551) 2024-12-12T20:38:06,215 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1cf4c430205ee121a94b18015bb08a6a, had cached 0 bytes from a total of 5216 2024-12-12T20:38:06,226 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region d08da60ed6cea1345519d42919fe2917, had cached 0 bytes from a total of 8392 2024-12-12T20:38:06,277 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-12T20:38:06,288 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-12T20:38:06,335 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb-testExportWithResetTtl 2024-12-12T20:38:06,335 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-12T20:38:06,350 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-12T20:38:06,350 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-12T20:38:06,354 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-12T20:38:06,354 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-12T20:38:06,354 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035848438/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035848438/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-12T20:38:06,356 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035848438/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-12T20:38:06,356 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035848438/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-12T20:38:06,411 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testExportWithResetTtl 2024-12-12T20:38:06,412 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-12T20:38:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testExportWithResetTtl 2024-12-12T20:38:06,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T20:38:06,440 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035886439"}]},"ts":"1734035886439"} 2024-12-12T20:38:06,450 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-12T20:38:06,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T20:38:06,682 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-12T20:38:06,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-12T20:38:06,698 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=68b62c0066b785f26c8f5639ed838724, UNASSIGN}, {pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=28e9de42dab24f1a07f10c9f5a1c3fae, UNASSIGN}] 2024-12-12T20:38:06,708 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=28e9de42dab24f1a07f10c9f5a1c3fae, UNASSIGN 2024-12-12T20:38:06,709 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=68b62c0066b785f26c8f5639ed838724, UNASSIGN 2024-12-12T20:38:06,710 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=28e9de42dab24f1a07f10c9f5a1c3fae, regionState=CLOSING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:38:06,717 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=68b62c0066b785f26c8f5639ed838724, regionState=CLOSING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:38:06,718 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:38:06,718 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE; CloseRegionProcedure 28e9de42dab24f1a07f10c9f5a1c3fae, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:38:06,727 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:38:06,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=51, state=RUNNABLE; CloseRegionProcedure 68b62c0066b785f26c8f5639ed838724, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:38:06,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T20:38:06,882 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:38:06,886 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(124): Close 28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:38:06,887 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:38:06,887 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1681): Closing 28e9de42dab24f1a07f10c9f5a1c3fae, disabling compactions & flushes 2024-12-12T20:38:06,887 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:38:06,887 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:38:06,888 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. after waiting 0 ms 2024-12-12T20:38:06,888 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:38:06,895 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:38:06,902 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(124): Close 68b62c0066b785f26c8f5639ed838724 2024-12-12T20:38:06,902 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:38:06,903 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1681): Closing 68b62c0066b785f26c8f5639ed838724, disabling compactions & flushes 2024-12-12T20:38:06,903 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:38:06,903 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:38:06,903 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. after waiting 0 ms 2024-12-12T20:38:06,903 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:38:06,959 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T20:38:06,971 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:38:06,972 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724. 2024-12-12T20:38:06,972 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1635): Region close journal for 68b62c0066b785f26c8f5639ed838724: 2024-12-12T20:38:06,982 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(170): Closed 68b62c0066b785f26c8f5639ed838724 2024-12-12T20:38:06,988 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=68b62c0066b785f26c8f5639ed838724, regionState=CLOSED 2024-12-12T20:38:06,992 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T20:38:07,000 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:38:07,000 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae. 2024-12-12T20:38:07,000 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1635): Region close journal for 28e9de42dab24f1a07f10c9f5a1c3fae: 2024-12-12T20:38:07,012 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(170): Closed 28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:38:07,014 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=28e9de42dab24f1a07f10c9f5a1c3fae, regionState=CLOSED 2024-12-12T20:38:07,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=51 2024-12-12T20:38:07,019 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=51, state=SUCCESS; CloseRegionProcedure 68b62c0066b785f26c8f5639ed838724, server=0a737713f6e3,42407,1734035781742 in 273 msec 2024-12-12T20:38:07,040 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=68b62c0066b785f26c8f5639ed838724, UNASSIGN in 321 msec 2024-12-12T20:38:07,043 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=52 2024-12-12T20:38:07,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=52, state=SUCCESS; CloseRegionProcedure 28e9de42dab24f1a07f10c9f5a1c3fae, server=0a737713f6e3,36817,1734035782356 in 308 msec 2024-12-12T20:38:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T20:38:07,065 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=50 2024-12-12T20:38:07,065 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=28e9de42dab24f1a07f10c9f5a1c3fae, UNASSIGN in 346 msec 2024-12-12T20:38:07,079 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-12T20:38:07,079 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; CloseTableRegionsProcedure table=testExportWithResetTtl in 385 msec 2024-12-12T20:38:07,083 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035887083"}]},"ts":"1734035887083"} 2024-12-12T20:38:07,108 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-12T20:38:07,314 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-12T20:38:07,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; DisableTableProcedure table=testExportWithResetTtl in 906 msec 2024-12-12T20:38:07,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T20:38:07,549 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testExportWithResetTtl, procId: 49 completed 2024-12-12T20:38:07,550 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-12T20:38:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T20:38:07,551 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T20:38:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(259): Removing permissions of removed table testExportWithResetTtl 2024-12-12T20:38:07,552 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=55, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T20:38:07,554 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-12T20:38:07,556 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724 2024-12-12T20:38:07,556 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:38:07,558 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/recovered.edits] 2024-12-12T20:38:07,558 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/recovered.edits] 2024-12-12T20:38:07,565 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/cf/f594e9df5a994858950c9ec54e9f8308 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/cf/f594e9df5a994858950c9ec54e9f8308 2024-12-12T20:38:07,565 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/cf/411ff41b23014525a27a018df367261f to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/cf/411ff41b23014525a27a018df367261f 2024-12-12T20:38:07,569 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/recovered.edits/8.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae/recovered.edits/8.seqid 2024-12-12T20:38:07,570 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/28e9de42dab24f1a07f10c9f5a1c3fae 2024-12-12T20:38:07,575 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/recovered.edits/8.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724/recovered.edits/8.seqid 2024-12-12T20:38:07,576 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportWithResetTtl/68b62c0066b785f26c8f5639ed838724 2024-12-12T20:38:07,576 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-12T20:38:07,578 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=55, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T20:38:07,581 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-12T20:38:08,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T20:38:08,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T20:38:08,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T20:38:08,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T20:38:08,007 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-12T20:38:08,007 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-12T20:38:08,007 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-12T20:38:08,008 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-12T20:38:08,009 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testExportWithResetTtl' descriptor. 2024-12-12T20:38:08,010 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=55, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T20:38:08,011 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testExportWithResetTtl' from region states. 2024-12-12T20:38:08,011 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734035888011"}]},"ts":"9223372036854775807"} 2024-12-12T20:38:08,011 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734035888011"}]},"ts":"9223372036854775807"} 2024-12-12T20:38:08,016 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-12T20:38:08,016 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 68b62c0066b785f26c8f5639ed838724, NAME => 'testExportWithResetTtl,,1734035844934.68b62c0066b785f26c8f5639ed838724.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 28e9de42dab24f1a07f10c9f5a1c3fae, NAME => 'testExportWithResetTtl,1,1734035844934.28e9de42dab24f1a07f10c9f5a1c3fae.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T20:38:08,016 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testExportWithResetTtl' as deleted. 2024-12-12T20:38:08,016 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734035888016"}]},"ts":"9223372036854775807"} 2024-12-12T20:38:08,019 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testExportWithResetTtl state from META 2024-12-12T20:38:08,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T20:38:08,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T20:38:08,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:08,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:08,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T20:38:08,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:08,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T20:38:08,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:08,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T20:38:08,031 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:08,031 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:08,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:08,034 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:08,036 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=55, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T20:38:08,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DeleteTableProcedure table=testExportWithResetTtl in 486 msec 2024-12-12T20:38:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T20:38:08,124 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testExportWithResetTtl, procId: 55 completed 2024-12-12T20:38:08,125 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithResetTtl 2024-12-12T20:38:08,125 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-12T20:38:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T20:38:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-12T20:38:08,135 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035888135"}]},"ts":"1734035888135"} 2024-12-12T20:38:08,141 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-12T20:38:08,154 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-12T20:38:08,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-12T20:38:08,163 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1cf4c430205ee121a94b18015bb08a6a, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d08da60ed6cea1345519d42919fe2917, UNASSIGN}] 2024-12-12T20:38:08,164 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d08da60ed6cea1345519d42919fe2917, UNASSIGN 2024-12-12T20:38:08,164 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1cf4c430205ee121a94b18015bb08a6a, UNASSIGN 2024-12-12T20:38:08,165 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=d08da60ed6cea1345519d42919fe2917, regionState=CLOSING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:38:08,167 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=1cf4c430205ee121a94b18015bb08a6a, regionState=CLOSING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:38:08,168 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:38:08,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; CloseRegionProcedure d08da60ed6cea1345519d42919fe2917, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:38:08,169 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:38:08,170 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE; CloseRegionProcedure 1cf4c430205ee121a94b18015bb08a6a, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:38:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-12T20:38:08,322 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:38:08,324 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:38:08,325 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(124): Close 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:38:08,325 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:38:08,326 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1681): Closing 1cf4c430205ee121a94b18015bb08a6a, disabling compactions & flushes 2024-12-12T20:38:08,326 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:38:08,326 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:38:08,326 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. after waiting 0 ms 2024-12-12T20:38:08,326 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:38:08,335 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(124): Close d08da60ed6cea1345519d42919fe2917 2024-12-12T20:38:08,335 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:38:08,336 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1681): Closing d08da60ed6cea1345519d42919fe2917, disabling compactions & flushes 2024-12-12T20:38:08,336 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:38:08,336 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:38:08,336 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. after waiting 0 ms 2024-12-12T20:38:08,336 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:38:08,399 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:38:08,401 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:38:08,401 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a. 2024-12-12T20:38:08,402 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1635): Region close journal for 1cf4c430205ee121a94b18015bb08a6a: 2024-12-12T20:38:08,412 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(170): Closed 1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:38:08,412 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=1cf4c430205ee121a94b18015bb08a6a, regionState=CLOSED 2024-12-12T20:38:08,420 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=58 2024-12-12T20:38:08,420 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=58, state=SUCCESS; CloseRegionProcedure 1cf4c430205ee121a94b18015bb08a6a, server=0a737713f6e3,36817,1734035782356 in 245 msec 2024-12-12T20:38:08,421 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:38:08,431 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:38:08,431 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917. 2024-12-12T20:38:08,431 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1635): Region close journal for d08da60ed6cea1345519d42919fe2917: 2024-12-12T20:38:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-12T20:38:08,441 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(170): Closed d08da60ed6cea1345519d42919fe2917 2024-12-12T20:38:08,441 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1cf4c430205ee121a94b18015bb08a6a, UNASSIGN in 258 msec 2024-12-12T20:38:08,444 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=d08da60ed6cea1345519d42919fe2917, regionState=CLOSED 2024-12-12T20:38:08,448 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-12T20:38:08,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseRegionProcedure d08da60ed6cea1345519d42919fe2917, server=0a737713f6e3,46859,1734035782031 in 278 msec 2024-12-12T20:38:08,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=59, resume processing ppid=57 2024-12-12T20:38:08,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d08da60ed6cea1345519d42919fe2917, UNASSIGN in 286 msec 2024-12-12T20:38:08,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-12T20:38:08,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 294 msec 2024-12-12T20:38:08,460 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035888460"}]},"ts":"1734035888460"} 2024-12-12T20:38:08,462 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-12T20:38:08,512 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-12T20:38:08,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithResetTtl in 395 msec 2024-12-12T20:38:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-12T20:38:08,758 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl, procId: 56 completed 2024-12-12T20:38:08,761 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-12T20:38:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T20:38:08,771 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T20:38:08,772 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T20:38:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-12T20:38:08,786 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-12T20:38:08,800 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:38:08,805 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917 2024-12-12T20:38:08,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T20:38:08,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T20:38:08,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T20:38:08,808 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/recovered.edits] 2024-12-12T20:38:08,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T20:38:08,819 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-12T20:38:08,819 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-12T20:38:08,820 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/recovered.edits] 2024-12-12T20:38:08,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T20:38:08,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:08,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T20:38:08,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-12T20:38:08,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:08,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:08,827 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-12T20:38:08,827 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-12T20:38:08,827 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-12T20:38:08,827 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-12T20:38:08,835 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/cf/2bc1b8f3b73d4ad685c47fb3bb44c2f7 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/cf/2bc1b8f3b73d4ad685c47fb3bb44c2f7 2024-12-12T20:38:08,835 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/cf/0ddf1a2af1eb4acbbbc8272bf17e9cde to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/cf/0ddf1a2af1eb4acbbbc8272bf17e9cde 2024-12-12T20:38:08,847 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917/recovered.edits/9.seqid 2024-12-12T20:38:08,851 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/d08da60ed6cea1345519d42919fe2917 2024-12-12T20:38:08,855 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a/recovered.edits/9.seqid 2024-12-12T20:38:08,857 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithResetTtl/1cf4c430205ee121a94b18015bb08a6a 2024-12-12T20:38:08,857 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-12T20:38:08,866 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T20:38:08,886 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-12T20:38:08,902 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-12T20:38:08,906 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T20:38:08,906 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-12T20:38:08,906 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734035888906"}]},"ts":"9223372036854775807"} 2024-12-12T20:38:08,906 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734035888906"}]},"ts":"9223372036854775807"} 2024-12-12T20:38:08,913 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-12T20:38:08,913 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 1cf4c430205ee121a94b18015bb08a6a, NAME => 'testtb-testExportWithResetTtl,,1734035840210.1cf4c430205ee121a94b18015bb08a6a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => d08da60ed6cea1345519d42919fe2917, NAME => 'testtb-testExportWithResetTtl,1,1734035840210.d08da60ed6cea1345519d42919fe2917.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T20:38:08,913 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-12T20:38:08,913 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734035888913"}]},"ts":"9223372036854775807"} 2024-12-12T20:38:08,918 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithResetTtl state from META 2024-12-12T20:38:08,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-12T20:38:08,937 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T20:38:08,941 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithResetTtl in 177 msec 2024-12-12T20:38:09,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-12T20:38:09,135 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl, procId: 62 completed 2024-12-12T20:38:09,165 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" 2024-12-12T20:38:09,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-12T20:38:09,170 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" 2024-12-12T20:38:09,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-12T20:38:09,177 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" 2024-12-12T20:38:09,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-12T20:38:09,214 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=775 (was 760) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35569 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:36208 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:52442 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:51416 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:35569 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:37615 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2027 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 57731) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=801 (was 804), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1858 (was 1482) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=2608 (was 3347) 2024-12-12T20:38:09,214 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=775 is superior to 500 2024-12-12T20:38:09,240 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=775, OpenFileDescriptor=803, MaxFileDescriptor=1048576, SystemLoadAverage=1858, ProcessCount=17, AvailableMemoryMB=2607 2024-12-12T20:38:09,240 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=775 is superior to 500 2024-12-12T20:38:09,247 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:38:09,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-12T20:38:09,267 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:38:09,267 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:38:09,268 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 63 2024-12-12T20:38:09,274 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:38:09,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T20:38:09,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741961_1137 (size=407) 2024-12-12T20:38:09,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741961_1137 (size=407) 2024-12-12T20:38:09,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T20:38:09,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741961_1137 (size=407) 2024-12-12T20:38:09,388 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 110a0e8d5f29dd7882db7ce25039469f, NAME => 'testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:38:09,398 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 266c34ac55fbc6640efd27a3e41ba601, NAME => 'testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:38:09,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741962_1138 (size=68) 2024-12-12T20:38:09,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741962_1138 (size=68) 2024-12-12T20:38:09,491 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:38:09,491 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 110a0e8d5f29dd7882db7ce25039469f, disabling compactions & flushes 2024-12-12T20:38:09,492 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:09,492 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:09,492 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. after waiting 0 ms 2024-12-12T20:38:09,492 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:09,492 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:09,492 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 110a0e8d5f29dd7882db7ce25039469f: 2024-12-12T20:38:09,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741962_1138 (size=68) 2024-12-12T20:38:09,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741963_1139 (size=68) 2024-12-12T20:38:09,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741963_1139 (size=68) 2024-12-12T20:38:09,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741963_1139 (size=68) 2024-12-12T20:38:09,547 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:38:09,547 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing 266c34ac55fbc6640efd27a3e41ba601, disabling compactions & flushes 2024-12-12T20:38:09,548 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:09,548 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:09,548 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. after waiting 0 ms 2024-12-12T20:38:09,548 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:09,548 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:09,548 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for 266c34ac55fbc6640efd27a3e41ba601: 2024-12-12T20:38:09,559 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:38:09,560 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734035889559"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035889559"}]},"ts":"1734035889559"} 2024-12-12T20:38:09,560 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734035889559"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035889559"}]},"ts":"1734035889559"} 2024-12-12T20:38:09,569 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:38:09,581 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:38:09,581 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035889581"}]},"ts":"1734035889581"} 2024-12-12T20:38:09,586 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-12T20:38:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T20:38:09,625 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:38:09,629 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:38:09,629 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:38:09,629 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:38:09,629 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:38:09,629 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:38:09,629 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:38:09,629 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:38:09,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=110a0e8d5f29dd7882db7ce25039469f, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=266c34ac55fbc6640efd27a3e41ba601, ASSIGN}] 2024-12-12T20:38:09,634 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=266c34ac55fbc6640efd27a3e41ba601, ASSIGN 2024-12-12T20:38:09,634 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=110a0e8d5f29dd7882db7ce25039469f, ASSIGN 2024-12-12T20:38:09,635 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=110a0e8d5f29dd7882db7ce25039469f, ASSIGN; state=OFFLINE, location=0a737713f6e3,46859,1734035782031; forceNewPlan=false, retain=false 2024-12-12T20:38:09,635 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=266c34ac55fbc6640efd27a3e41ba601, ASSIGN; state=OFFLINE, location=0a737713f6e3,36817,1734035782356; forceNewPlan=false, retain=false 2024-12-12T20:38:09,786 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:38:09,787 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=266c34ac55fbc6640efd27a3e41ba601, regionState=OPENING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:38:09,787 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=110a0e8d5f29dd7882db7ce25039469f, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:38:09,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure 266c34ac55fbc6640efd27a3e41ba601, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:38:09,802 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE; OpenRegionProcedure 110a0e8d5f29dd7882db7ce25039469f, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:38:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T20:38:09,955 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:38:09,961 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:09,962 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 266c34ac55fbc6640efd27a3e41ba601, NAME => 'testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T20:38:09,962 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. service=AccessControlService 2024-12-12T20:38:09,963 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:38:09,963 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:09,963 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:38:09,963 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:09,963 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:09,968 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:38:09,969 INFO [StoreOpener-266c34ac55fbc6640efd27a3e41ba601-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:09,973 INFO [StoreOpener-266c34ac55fbc6640efd27a3e41ba601-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 266c34ac55fbc6640efd27a3e41ba601 columnFamilyName cf 2024-12-12T20:38:09,973 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:09,973 DEBUG [StoreOpener-266c34ac55fbc6640efd27a3e41ba601-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:38:09,973 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7285): Opening region: {ENCODED => 110a0e8d5f29dd7882db7ce25039469f, NAME => 'testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T20:38:09,973 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. service=AccessControlService 2024-12-12T20:38:09,974 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:38:09,974 INFO [StoreOpener-266c34ac55fbc6640efd27a3e41ba601-1 {}] regionserver.HStore(327): Store=266c34ac55fbc6640efd27a3e41ba601/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:38:09,974 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:09,974 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:38:09,974 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7327): checking encryption for 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:09,974 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7330): checking classloading for 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:09,975 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:09,975 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:09,978 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:09,984 INFO [StoreOpener-110a0e8d5f29dd7882db7ce25039469f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:09,987 INFO [StoreOpener-110a0e8d5f29dd7882db7ce25039469f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 110a0e8d5f29dd7882db7ce25039469f columnFamilyName cf 2024-12-12T20:38:09,987 DEBUG [StoreOpener-110a0e8d5f29dd7882db7ce25039469f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:38:09,988 INFO [StoreOpener-110a0e8d5f29dd7882db7ce25039469f-1 {}] regionserver.HStore(327): Store=110a0e8d5f29dd7882db7ce25039469f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:38:09,989 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:09,990 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:09,992 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:38:09,993 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 266c34ac55fbc6640efd27a3e41ba601; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71068364, jitterRate=0.059001147747039795}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:38:09,995 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 266c34ac55fbc6640efd27a3e41ba601: 2024-12-12T20:38:09,996 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1085): writing seq id for 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:10,000 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601., pid=66, masterSystemTime=1734035889954 2024-12-12T20:38:10,004 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:10,004 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:10,005 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=266c34ac55fbc6640efd27a3e41ba601, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:38:10,014 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-12T20:38:10,014 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure 266c34ac55fbc6640efd27a3e41ba601, server=0a737713f6e3,36817,1734035782356 in 217 msec 2024-12-12T20:38:10,016 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:38:10,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=266c34ac55fbc6640efd27a3e41ba601, ASSIGN in 385 msec 2024-12-12T20:38:10,017 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1102): Opened 110a0e8d5f29dd7882db7ce25039469f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74285035, jitterRate=0.10693328082561493}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:38:10,017 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1001): Region open journal for 110a0e8d5f29dd7882db7ce25039469f: 2024-12-12T20:38:10,018 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f., pid=67, masterSystemTime=1734035889968 2024-12-12T20:38:10,020 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:10,021 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:10,021 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=110a0e8d5f29dd7882db7ce25039469f, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:38:10,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=64 2024-12-12T20:38:10,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=64, state=SUCCESS; OpenRegionProcedure 110a0e8d5f29dd7882db7ce25039469f, server=0a737713f6e3,46859,1734035782031 in 221 msec 2024-12-12T20:38:10,030 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=0a737713f6e3,46859,1734035782031, table=testtb-testExportFileSystemState, region=110a0e8d5f29dd7882db7ce25039469f. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-12T20:38:10,050 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-12T20:38:10,050 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=110a0e8d5f29dd7882db7ce25039469f, ASSIGN in 400 msec 2024-12-12T20:38:10,051 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:38:10,051 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035890051"}]},"ts":"1734035890051"} 2024-12-12T20:38:10,055 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-12T20:38:10,066 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:38:10,066 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-12T20:38:10,070 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T20:38:10,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:10,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:10,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:10,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:10,087 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:10,091 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:10,091 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:10,092 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:10,094 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemState in 841 msec 2024-12-12T20:38:10,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T20:38:10,409 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState, procId: 63 completed 2024-12-12T20:38:10,409 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-12T20:38:10,410 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:38:10,415 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-12T20:38:10,416 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:38:10,416 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemState assigned. 2024-12-12T20:38:10,421 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T20:38:10,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035890421 (current time:1734035890421). 2024-12-12T20:38:10,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:38:10,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-12T20:38:10,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:38:10,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x473ad85b to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10517995 2024-12-12T20:38:10,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25c7ca3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:38:10,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:38:10,467 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33112, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:38:10,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x473ad85b to 127.0.0.1:52655 2024-12-12T20:38:10,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:38:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x56870df6 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6939e0de 2024-12-12T20:38:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@861303e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:38:10,508 DEBUG [hconnection-0x40f6aece-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:38:10,513 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33122, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:38:10,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x56870df6 to 127.0.0.1:52655 2024-12-12T20:38:10,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:38:10,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T20:38:10,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:38:10,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T20:38:10,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-12T20:38:10,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-12T20:38:10,527 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:38:10,529 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:38:10,542 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:38:10,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741964_1140 (size=170) 2024-12-12T20:38:10,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741964_1140 (size=170) 2024-12-12T20:38:10,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741964_1140 (size=170) 2024-12-12T20:38:10,605 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:38:10,605 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 110a0e8d5f29dd7882db7ce25039469f}, {pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 266c34ac55fbc6640efd27a3e41ba601}] 2024-12-12T20:38:10,612 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:10,612 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:10,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-12T20:38:10,660 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0002_000001 (auth:SIMPLE) from 127.0.0.1:40852 2024-12-12T20:38:10,694 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_3/usercache/jenkins/appcache/application_1734035798001_0002/container_1734035798001_0002_01_000001/launch_container.sh] 2024-12-12T20:38:10,694 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_3/usercache/jenkins/appcache/application_1734035798001_0002/container_1734035798001_0002_01_000001/container_tokens] 2024-12-12T20:38:10,694 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_3/usercache/jenkins/appcache/application_1734035798001_0002/container_1734035798001_0002_01_000001/sysfs] 2024-12-12T20:38:10,765 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:38:10,765 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:38:10,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-12T20:38:10,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:10,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 266c34ac55fbc6640efd27a3e41ba601: 2024-12-12T20:38:10,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. for emptySnaptb0-testExportFileSystemState completed. 2024-12-12T20:38:10,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-12T20:38:10,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-12T20:38:10,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:38:10,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:38:10,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:10,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2538): Flush status journal for 110a0e8d5f29dd7882db7ce25039469f: 2024-12-12T20:38:10,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. for emptySnaptb0-testExportFileSystemState completed. 2024-12-12T20:38:10,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-12T20:38:10,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:38:10,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:38:10,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741965_1141 (size=71) 2024-12-12T20:38:10,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741965_1141 (size=71) 2024-12-12T20:38:10,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741965_1141 (size=71) 2024-12-12T20:38:10,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:10,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-12T20:38:10,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-12T20:38:10,822 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:10,823 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:10,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741966_1142 (size=71) 2024-12-12T20:38:10,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741966_1142 (size=71) 2024-12-12T20:38:10,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741966_1142 (size=71) 2024-12-12T20:38:10,829 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=68, state=SUCCESS; SnapshotRegionProcedure 266c34ac55fbc6640efd27a3e41ba601 in 218 msec 2024-12-12T20:38:10,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-12T20:38:11,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-12T20:38:11,127 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-12T20:38:11,128 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-12T20:38:11,129 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-12T20:38:11,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-12T20:38:11,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:11,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-12T20:38:11,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=69 2024-12-12T20:38:11,229 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:11,229 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:11,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-12T20:38:11,236 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:38:11,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; SnapshotRegionProcedure 110a0e8d5f29dd7882db7ce25039469f in 625 msec 2024-12-12T20:38:11,239 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:38:11,241 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:38:11,241 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-12T20:38:11,243 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-12T20:38:11,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741967_1143 (size=552) 2024-12-12T20:38:11,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741967_1143 (size=552) 2024-12-12T20:38:11,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741967_1143 (size=552) 2024-12-12T20:38:11,290 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:38:11,308 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:38:11,309 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-12T20:38:11,318 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:38:11,318 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-12T20:38:11,325 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 797 msec 2024-12-12T20:38:11,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-12T20:38:11,638 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 68 completed 2024-12-12T20:38:11,660 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46859 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:38:11,672 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36817 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:38:11,705 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemState 2024-12-12T20:38:11,705 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:11,706 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:38:11,784 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T20:38:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035891784 (current time:1734035891784). 2024-12-12T20:38:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:38:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-12T20:38:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:38:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0988f878 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c7db759 2024-12-12T20:38:11,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5538ff76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:38:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:38:11,832 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33130, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:38:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0988f878 to 127.0.0.1:52655 2024-12-12T20:38:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:38:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ddea060 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c0813fd 2024-12-12T20:38:11,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ed99645, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:38:11,890 DEBUG [hconnection-0x13c5554c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:38:11,896 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:38:11,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ddea060 to 127.0.0.1:52655 2024-12-12T20:38:11,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:38:11,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T20:38:11,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:38:11,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T20:38:11,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-12T20:38:11,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T20:38:11,930 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:38:11,932 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:38:11,938 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:38:11,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741968_1144 (size=165) 2024-12-12T20:38:11,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741968_1144 (size=165) 2024-12-12T20:38:11,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741968_1144 (size=165) 2024-12-12T20:38:11,988 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:38:11,988 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 110a0e8d5f29dd7882db7ce25039469f}, {pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 266c34ac55fbc6640efd27a3e41ba601}] 2024-12-12T20:38:11,995 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:11,996 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:12,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T20:38:12,151 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:38:12,151 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:38:12,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-12T20:38:12,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-12T20:38:12,152 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:12,153 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 110a0e8d5f29dd7882db7ce25039469f 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-12T20:38:12,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:12,153 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2837): Flushing 266c34ac55fbc6640efd27a3e41ba601 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-12T20:38:12,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/.tmp/cf/7459f9c769d7454fbbb28302e56368d1 is 71, key is 037b12c84dc1a8bf0371f47d9bc6790b/cf:q/1734035891660/Put/seqid=0 2024-12-12T20:38:12,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/.tmp/cf/e542ad1b37324656a8e0b0e1fc7ac6ac is 71, key is 180f89b33341e5242a540039ba4218b7/cf:q/1734035891672/Put/seqid=0 2024-12-12T20:38:12,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T20:38:12,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741969_1145 (size=5216) 2024-12-12T20:38:12,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741969_1145 (size=5216) 2024-12-12T20:38:12,275 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/.tmp/cf/7459f9c769d7454fbbb28302e56368d1 2024-12-12T20:38:12,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741969_1145 (size=5216) 2024-12-12T20:38:12,288 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/.tmp/cf/7459f9c769d7454fbbb28302e56368d1 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/cf/7459f9c769d7454fbbb28302e56368d1 2024-12-12T20:38:12,310 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/cf/7459f9c769d7454fbbb28302e56368d1, entries=2, sequenceid=6, filesize=5.1 K 2024-12-12T20:38:12,315 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 110a0e8d5f29dd7882db7ce25039469f in 163ms, sequenceid=6, compaction requested=false 2024-12-12T20:38:12,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-12T20:38:12,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 110a0e8d5f29dd7882db7ce25039469f: 2024-12-12T20:38:12,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. for snaptb0-testExportFileSystemState completed. 2024-12-12T20:38:12,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-12T20:38:12,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:38:12,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/cf/7459f9c769d7454fbbb28302e56368d1] hfiles 2024-12-12T20:38:12,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/cf/7459f9c769d7454fbbb28302e56368d1 for snapshot=snaptb0-testExportFileSystemState 2024-12-12T20:38:12,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741970_1146 (size=8394) 2024-12-12T20:38:12,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741970_1146 (size=8394) 2024-12-12T20:38:12,333 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/.tmp/cf/e542ad1b37324656a8e0b0e1fc7ac6ac 2024-12-12T20:38:12,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741970_1146 (size=8394) 2024-12-12T20:38:12,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/.tmp/cf/e542ad1b37324656a8e0b0e1fc7ac6ac as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/cf/e542ad1b37324656a8e0b0e1fc7ac6ac 2024-12-12T20:38:12,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741971_1147 (size=110) 2024-12-12T20:38:12,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741971_1147 (size=110) 2024-12-12T20:38:12,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741971_1147 (size=110) 2024-12-12T20:38:12,407 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:12,408 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-12T20:38:12,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-12T20:38:12,411 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:12,412 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:12,426 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; SnapshotRegionProcedure 110a0e8d5f29dd7882db7ce25039469f in 433 msec 2024-12-12T20:38:12,438 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/cf/e542ad1b37324656a8e0b0e1fc7ac6ac, entries=48, sequenceid=6, filesize=8.2 K 2024-12-12T20:38:12,444 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 266c34ac55fbc6640efd27a3e41ba601 in 290ms, sequenceid=6, compaction requested=false 2024-12-12T20:38:12,444 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2538): Flush status journal for 266c34ac55fbc6640efd27a3e41ba601: 2024-12-12T20:38:12,444 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. for snaptb0-testExportFileSystemState completed. 2024-12-12T20:38:12,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-12T20:38:12,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:38:12,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/cf/e542ad1b37324656a8e0b0e1fc7ac6ac] hfiles 2024-12-12T20:38:12,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/cf/e542ad1b37324656a8e0b0e1fc7ac6ac for snapshot=snaptb0-testExportFileSystemState 2024-12-12T20:38:12,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T20:38:12,545 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:38:12,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741972_1148 (size=110) 2024-12-12T20:38:12,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741972_1148 (size=110) 2024-12-12T20:38:12,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741972_1148 (size=110) 2024-12-12T20:38:12,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:12,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-12T20:38:12,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=73 2024-12-12T20:38:12,586 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:12,587 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:12,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=73, resume processing ppid=71 2024-12-12T20:38:12,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=71, state=SUCCESS; SnapshotRegionProcedure 266c34ac55fbc6640efd27a3e41ba601 in 605 msec 2024-12-12T20:38:12,613 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:38:12,641 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:38:12,642 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:38:12,642 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-12T20:38:12,646 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-12T20:38:12,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741973_1149 (size=630) 2024-12-12T20:38:12,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741973_1149 (size=630) 2024-12-12T20:38:12,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741973_1149 (size=630) 2024-12-12T20:38:12,805 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:38:12,846 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:38:12,847 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-12T20:38:12,862 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:38:12,862 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-12T20:38:12,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 938 msec 2024-12-12T20:38:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T20:38:13,046 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 71 completed 2024-12-12T20:38:13,046 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035893046 2024-12-12T20:38:13,046 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37615, tgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035893046, rawTgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035893046, srcFsUri=hdfs://localhost:37615, srcDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:38:13,105 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37615, inputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:38:13,105 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035893046, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035893046/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-12T20:38:13,112 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-12T20:38:13,132 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035893046/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-12T20:38:13,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741974_1150 (size=165) 2024-12-12T20:38:13,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741974_1150 (size=165) 2024-12-12T20:38:13,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741974_1150 (size=165) 2024-12-12T20:38:13,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741975_1151 (size=630) 2024-12-12T20:38:13,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741975_1151 (size=630) 2024-12-12T20:38:13,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741975_1151 (size=630) 2024-12-12T20:38:13,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:13,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:13,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:13,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:14,538 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-7159955265248538219.jar 2024-12-12T20:38:14,539 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:14,539 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:14,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-4402363826045515862.jar 2024-12-12T20:38:14,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:14,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:14,646 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:14,646 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:14,646 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:14,647 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:14,647 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T20:38:14,647 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T20:38:14,648 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T20:38:14,648 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T20:38:14,648 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T20:38:14,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T20:38:14,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-12T20:38:14,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T20:38:14,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T20:38:14,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T20:38:14,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T20:38:14,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T20:38:14,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:38:14,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:38:14,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:38:14,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:38:14,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:38:14,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:38:14,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:38:14,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741976_1152 (size=127628) 2024-12-12T20:38:14,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741976_1152 (size=127628) 2024-12-12T20:38:14,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741976_1152 (size=127628) 2024-12-12T20:38:14,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741977_1153 (size=2172101) 2024-12-12T20:38:14,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741977_1153 (size=2172101) 2024-12-12T20:38:14,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741977_1153 (size=2172101) 2024-12-12T20:38:14,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741978_1154 (size=213228) 2024-12-12T20:38:14,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741978_1154 (size=213228) 2024-12-12T20:38:14,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741978_1154 (size=213228) 2024-12-12T20:38:14,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741979_1155 (size=1877034) 2024-12-12T20:38:14,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741979_1155 (size=1877034) 2024-12-12T20:38:14,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741979_1155 (size=1877034) 2024-12-12T20:38:14,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741980_1156 (size=533455) 2024-12-12T20:38:14,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741980_1156 (size=533455) 2024-12-12T20:38:14,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741980_1156 (size=533455) 2024-12-12T20:38:14,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741981_1157 (size=7280644) 2024-12-12T20:38:14,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741981_1157 (size=7280644) 2024-12-12T20:38:14,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741981_1157 (size=7280644) 2024-12-12T20:38:15,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741982_1158 (size=4188619) 2024-12-12T20:38:15,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741982_1158 (size=4188619) 2024-12-12T20:38:15,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741982_1158 (size=4188619) 2024-12-12T20:38:15,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741983_1159 (size=20406) 2024-12-12T20:38:15,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741983_1159 (size=20406) 2024-12-12T20:38:15,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741983_1159 (size=20406) 2024-12-12T20:38:15,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741984_1160 (size=75495) 2024-12-12T20:38:15,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741984_1160 (size=75495) 2024-12-12T20:38:15,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741984_1160 (size=75495) 2024-12-12T20:38:15,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741985_1161 (size=45609) 2024-12-12T20:38:15,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741985_1161 (size=45609) 2024-12-12T20:38:15,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741985_1161 (size=45609) 2024-12-12T20:38:15,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741986_1162 (size=110084) 2024-12-12T20:38:15,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741986_1162 (size=110084) 2024-12-12T20:38:15,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741986_1162 (size=110084) 2024-12-12T20:38:15,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741987_1163 (size=1323991) 2024-12-12T20:38:15,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741987_1163 (size=1323991) 2024-12-12T20:38:15,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741987_1163 (size=1323991) 2024-12-12T20:38:15,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741988_1164 (size=23076) 2024-12-12T20:38:15,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741988_1164 (size=23076) 2024-12-12T20:38:15,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741988_1164 (size=23076) 2024-12-12T20:38:15,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741989_1165 (size=126803) 2024-12-12T20:38:15,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741989_1165 (size=126803) 2024-12-12T20:38:15,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741989_1165 (size=126803) 2024-12-12T20:38:15,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741990_1166 (size=322274) 2024-12-12T20:38:15,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741990_1166 (size=322274) 2024-12-12T20:38:15,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741990_1166 (size=322274) 2024-12-12T20:38:15,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741991_1167 (size=6350857) 2024-12-12T20:38:15,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741991_1167 (size=6350857) 2024-12-12T20:38:15,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741991_1167 (size=6350857) 2024-12-12T20:38:15,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741992_1168 (size=1832290) 2024-12-12T20:38:15,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741992_1168 (size=1832290) 2024-12-12T20:38:15,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741992_1168 (size=1832290) 2024-12-12T20:38:15,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741993_1169 (size=30081) 2024-12-12T20:38:15,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741993_1169 (size=30081) 2024-12-12T20:38:15,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741993_1169 (size=30081) 2024-12-12T20:38:15,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741994_1170 (size=53616) 2024-12-12T20:38:15,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741994_1170 (size=53616) 2024-12-12T20:38:15,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741994_1170 (size=53616) 2024-12-12T20:38:15,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741995_1171 (size=29229) 2024-12-12T20:38:15,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741995_1171 (size=29229) 2024-12-12T20:38:15,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741995_1171 (size=29229) 2024-12-12T20:38:15,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741996_1172 (size=169089) 2024-12-12T20:38:15,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741996_1172 (size=169089) 2024-12-12T20:38:15,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741996_1172 (size=169089) 2024-12-12T20:38:15,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741997_1173 (size=451756) 2024-12-12T20:38:15,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741997_1173 (size=451756) 2024-12-12T20:38:15,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741997_1173 (size=451756) 2024-12-12T20:38:15,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741998_1174 (size=5175431) 2024-12-12T20:38:15,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741998_1174 (size=5175431) 2024-12-12T20:38:15,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741998_1174 (size=5175431) 2024-12-12T20:38:15,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741999_1175 (size=136454) 2024-12-12T20:38:15,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741999_1175 (size=136454) 2024-12-12T20:38:15,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741999_1175 (size=136454) 2024-12-12T20:38:15,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742000_1176 (size=907861) 2024-12-12T20:38:15,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742000_1176 (size=907861) 2024-12-12T20:38:15,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742000_1176 (size=907861) 2024-12-12T20:38:15,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742001_1177 (size=3317408) 2024-12-12T20:38:15,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742001_1177 (size=3317408) 2024-12-12T20:38:15,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742001_1177 (size=3317408) 2024-12-12T20:38:15,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742002_1178 (size=503880) 2024-12-12T20:38:15,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742002_1178 (size=503880) 2024-12-12T20:38:15,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742002_1178 (size=503880) 2024-12-12T20:38:15,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742003_1179 (size=4695811) 2024-12-12T20:38:15,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742003_1179 (size=4695811) 2024-12-12T20:38:15,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742003_1179 (size=4695811) 2024-12-12T20:38:15,843 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T20:38:15,847 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-12T20:38:15,850 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-12T20:38:15,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742004_1180 (size=344) 2024-12-12T20:38:15,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742004_1180 (size=344) 2024-12-12T20:38:15,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742004_1180 (size=344) 2024-12-12T20:38:16,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742005_1181 (size=15) 2024-12-12T20:38:16,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742005_1181 (size=15) 2024-12-12T20:38:16,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742005_1181 (size=15) 2024-12-12T20:38:16,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742006_1182 (size=304889) 2024-12-12T20:38:16,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742006_1182 (size=304889) 2024-12-12T20:38:16,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742006_1182 (size=304889) 2024-12-12T20:38:16,341 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:38:16,341 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:38:16,680 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0003_000001 (auth:SIMPLE) from 127.0.0.1:40860 2024-12-12T20:38:19,299 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:38:27,838 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 266c34ac55fbc6640efd27a3e41ba601 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:38:27,838 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 110a0e8d5f29dd7882db7ce25039469f changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:38:28,572 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0003_000001 (auth:SIMPLE) from 127.0.0.1:58706 2024-12-12T20:38:29,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742007_1183 (size=350563) 2024-12-12T20:38:29,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742007_1183 (size=350563) 2024-12-12T20:38:29,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742007_1183 (size=350563) 2024-12-12T20:38:31,197 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0003_000001 (auth:SIMPLE) from 127.0.0.1:53896 2024-12-12T20:38:39,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742008_1184 (size=8394) 2024-12-12T20:38:39,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742008_1184 (size=8394) 2024-12-12T20:38:39,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742008_1184 (size=8394) 2024-12-12T20:38:39,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742009_1185 (size=5216) 2024-12-12T20:38:39,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742009_1185 (size=5216) 2024-12-12T20:38:39,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742009_1185 (size=5216) 2024-12-12T20:38:40,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742010_1186 (size=17426) 2024-12-12T20:38:40,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742010_1186 (size=17426) 2024-12-12T20:38:40,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742010_1186 (size=17426) 2024-12-12T20:38:40,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742011_1187 (size=465) 2024-12-12T20:38:40,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742011_1187 (size=465) 2024-12-12T20:38:40,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742011_1187 (size=465) 2024-12-12T20:38:40,299 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0003/container_1734035798001_0003_01_000002/launch_container.sh] 2024-12-12T20:38:40,299 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0003/container_1734035798001_0003_01_000002/container_tokens] 2024-12-12T20:38:40,299 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0003/container_1734035798001_0003_01_000002/sysfs] 2024-12-12T20:38:40,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742012_1188 (size=17426) 2024-12-12T20:38:40,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742012_1188 (size=17426) 2024-12-12T20:38:40,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742012_1188 (size=17426) 2024-12-12T20:38:40,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742013_1189 (size=350563) 2024-12-12T20:38:40,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742013_1189 (size=350563) 2024-12-12T20:38:40,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742013_1189 (size=350563) 2024-12-12T20:38:40,475 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0003_000001 (auth:SIMPLE) from 127.0.0.1:40018 2024-12-12T20:38:41,915 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-12T20:38:41,939 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-12T20:38:41,999 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemState 2024-12-12T20:38:41,999 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-12T20:38:42,001 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-12T20:38:42,001 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-12T20:38:42,007 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-12T20:38:42,008 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-12T20:38:42,008 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035893046/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035893046/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-12T20:38:42,015 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035893046/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-12T20:38:42,015 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035893046/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-12T20:38:42,044 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemState 2024-12-12T20:38:42,046 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-12T20:38:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=74, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-12T20:38:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-12T20:38:42,054 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035922054"}]},"ts":"1734035922054"} 2024-12-12T20:38:42,057 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-12T20:38:42,111 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-12T20:38:42,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-12T20:38:42,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=110a0e8d5f29dd7882db7ce25039469f, UNASSIGN}, {pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=266c34ac55fbc6640efd27a3e41ba601, UNASSIGN}] 2024-12-12T20:38:42,114 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=266c34ac55fbc6640efd27a3e41ba601, UNASSIGN 2024-12-12T20:38:42,114 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=110a0e8d5f29dd7882db7ce25039469f, UNASSIGN 2024-12-12T20:38:42,115 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=266c34ac55fbc6640efd27a3e41ba601, regionState=CLOSING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:38:42,115 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=110a0e8d5f29dd7882db7ce25039469f, regionState=CLOSING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:38:42,116 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:38:42,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; CloseRegionProcedure 266c34ac55fbc6640efd27a3e41ba601, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:38:42,117 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:38:42,118 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=76, state=RUNNABLE; CloseRegionProcedure 110a0e8d5f29dd7882db7ce25039469f, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:38:42,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-12T20:38:42,270 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:38:42,270 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:38:42,271 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(124): Close 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:42,271 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(124): Close 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:42,271 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:38:42,271 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:38:42,271 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1681): Closing 110a0e8d5f29dd7882db7ce25039469f, disabling compactions & flushes 2024-12-12T20:38:42,271 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:42,271 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:42,271 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. after waiting 0 ms 2024-12-12T20:38:42,271 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:42,272 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1681): Closing 266c34ac55fbc6640efd27a3e41ba601, disabling compactions & flushes 2024-12-12T20:38:42,272 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:42,272 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:42,272 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. after waiting 0 ms 2024-12-12T20:38:42,272 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:42,304 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:38:42,305 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:38:42,305 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601. 2024-12-12T20:38:42,305 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1635): Region close journal for 266c34ac55fbc6640efd27a3e41ba601: 2024-12-12T20:38:42,311 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:38:42,316 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(170): Closed 266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:42,317 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=266c34ac55fbc6640efd27a3e41ba601, regionState=CLOSED 2024-12-12T20:38:42,318 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:38:42,318 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f. 2024-12-12T20:38:42,318 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1635): Region close journal for 110a0e8d5f29dd7882db7ce25039469f: 2024-12-12T20:38:42,321 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=110a0e8d5f29dd7882db7ce25039469f, regionState=CLOSED 2024-12-12T20:38:42,321 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(170): Closed 110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:42,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-12T20:38:42,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; CloseRegionProcedure 266c34ac55fbc6640efd27a3e41ba601, server=0a737713f6e3,36817,1734035782356 in 204 msec 2024-12-12T20:38:42,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=76 2024-12-12T20:38:42,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=76, state=SUCCESS; CloseRegionProcedure 110a0e8d5f29dd7882db7ce25039469f, server=0a737713f6e3,46859,1734035782031 in 206 msec 2024-12-12T20:38:42,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=266c34ac55fbc6640efd27a3e41ba601, UNASSIGN in 211 msec 2024-12-12T20:38:42,328 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-12T20:38:42,328 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=110a0e8d5f29dd7882db7ce25039469f, UNASSIGN in 213 msec 2024-12-12T20:38:42,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-12-12T20:38:42,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 217 msec 2024-12-12T20:38:42,337 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035922337"}]},"ts":"1734035922337"} 2024-12-12T20:38:42,339 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-12T20:38:42,353 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-12T20:38:42,355 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemState in 307 msec 2024-12-12T20:38:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-12T20:38:42,357 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState, procId: 74 completed 2024-12-12T20:38:42,358 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-12T20:38:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T20:38:42,360 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T20:38:42,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-12T20:38:42,361 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=80, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T20:38:42,363 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-12T20:38:42,365 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:42,367 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:42,368 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/recovered.edits] 2024-12-12T20:38:42,370 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/recovered.edits] 2024-12-12T20:38:42,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T20:38:42,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T20:38:42,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T20:38:42,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T20:38:42,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-12T20:38:42,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-12T20:38:42,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-12T20:38:42,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-12T20:38:42,384 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/cf/e542ad1b37324656a8e0b0e1fc7ac6ac to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/cf/e542ad1b37324656a8e0b0e1fc7ac6ac 2024-12-12T20:38:42,384 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/cf/7459f9c769d7454fbbb28302e56368d1 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/cf/7459f9c769d7454fbbb28302e56368d1 2024-12-12T20:38:42,389 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601/recovered.edits/9.seqid 2024-12-12T20:38:42,389 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f/recovered.edits/9.seqid 2024-12-12T20:38:42,390 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/110a0e8d5f29dd7882db7ce25039469f 2024-12-12T20:38:42,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T20:38:42,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:42,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T20:38:42,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:42,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T20:38:42,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:42,391 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemState/266c34ac55fbc6640efd27a3e41ba601 2024-12-12T20:38:42,391 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-12T20:38:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-12T20:38:42,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T20:38:42,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:42,394 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=80, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T20:38:42,404 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-12T20:38:42,407 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-12T20:38:42,408 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=80, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T20:38:42,408 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-12T20:38:42,408 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734035922408"}]},"ts":"9223372036854775807"} 2024-12-12T20:38:42,409 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734035922408"}]},"ts":"9223372036854775807"} 2024-12-12T20:38:42,411 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-12T20:38:42,411 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 110a0e8d5f29dd7882db7ce25039469f, NAME => 'testtb-testExportFileSystemState,,1734035889246.110a0e8d5f29dd7882db7ce25039469f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 266c34ac55fbc6640efd27a3e41ba601, NAME => 'testtb-testExportFileSystemState,1,1734035889246.266c34ac55fbc6640efd27a3e41ba601.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T20:38:42,412 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-12T20:38:42,412 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734035922412"}]},"ts":"9223372036854775807"} 2024-12-12T20:38:42,419 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemState state from META 2024-12-12T20:38:42,432 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=80, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T20:38:42,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemState in 74 msec 2024-12-12T20:38:42,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-12T20:38:42,493 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState, procId: 80 completed 2024-12-12T20:38:42,500 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" 2024-12-12T20:38:42,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-12T20:38:42,503 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" 2024-12-12T20:38:42,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-12T20:38:42,523 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=784 (was 775) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2093578068_1 at /127.0.0.1:44146 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:36865 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2663 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/0a737713f6e3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:35771 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35771 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:35420 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:44176 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:33464 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40403 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/0a737713f6e3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36865 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 60316) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=802 (was 803), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1844 (was 1858), ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=2742 (was 2607) - AvailableMemoryMB LEAK? - 2024-12-12T20:38:42,523 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=784 is superior to 500 2024-12-12T20:38:42,542 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=784, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=1844, ProcessCount=20, AvailableMemoryMB=2741 2024-12-12T20:38:42,542 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=784 is superior to 500 2024-12-12T20:38:42,544 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:38:42,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-12T20:38:42,545 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:38:42,545 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:38:42,545 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 81 2024-12-12T20:38:42,546 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:38:42,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T20:38:42,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742014_1190 (size=404) 2024-12-12T20:38:42,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742014_1190 (size=404) 2024-12-12T20:38:42,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742014_1190 (size=404) 2024-12-12T20:38:42,572 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => df840831079a0010c945d68e2d3f2c51, NAME => 'testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:38:42,572 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 4c50796a514e2b2f0c40752ad11371fe, NAME => 'testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:38:42,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742015_1191 (size=65) 2024-12-12T20:38:42,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742015_1191 (size=65) 2024-12-12T20:38:42,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742015_1191 (size=65) 2024-12-12T20:38:42,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742016_1192 (size=65) 2024-12-12T20:38:42,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742016_1192 (size=65) 2024-12-12T20:38:42,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742016_1192 (size=65) 2024-12-12T20:38:42,604 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:38:42,604 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1681): Closing df840831079a0010c945d68e2d3f2c51, disabling compactions & flushes 2024-12-12T20:38:42,604 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:42,604 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:42,605 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. after waiting 0 ms 2024-12-12T20:38:42,605 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:42,605 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:42,605 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1635): Region close journal for df840831079a0010c945d68e2d3f2c51: 2024-12-12T20:38:42,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T20:38:42,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T20:38:43,003 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:38:43,003 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1681): Closing 4c50796a514e2b2f0c40752ad11371fe, disabling compactions & flushes 2024-12-12T20:38:43,003 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:38:43,003 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:38:43,003 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. after waiting 0 ms 2024-12-12T20:38:43,003 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:38:43,003 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:38:43,003 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1635): Region close journal for 4c50796a514e2b2f0c40752ad11371fe: 2024-12-12T20:38:43,004 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:38:43,005 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734035923004"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035923004"}]},"ts":"1734035923004"} 2024-12-12T20:38:43,005 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734035923004"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035923004"}]},"ts":"1734035923004"} 2024-12-12T20:38:43,009 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:38:43,018 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:38:43,019 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035923018"}]},"ts":"1734035923018"} 2024-12-12T20:38:43,024 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-12T20:38:43,044 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:38:43,047 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:38:43,047 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:38:43,047 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:38:43,047 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:38:43,047 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:38:43,047 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:38:43,047 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:38:43,048 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=df840831079a0010c945d68e2d3f2c51, ASSIGN}, {pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4c50796a514e2b2f0c40752ad11371fe, ASSIGN}] 2024-12-12T20:38:43,049 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4c50796a514e2b2f0c40752ad11371fe, ASSIGN 2024-12-12T20:38:43,050 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=df840831079a0010c945d68e2d3f2c51, ASSIGN 2024-12-12T20:38:43,050 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4c50796a514e2b2f0c40752ad11371fe, ASSIGN; state=OFFLINE, location=0a737713f6e3,36817,1734035782356; forceNewPlan=false, retain=false 2024-12-12T20:38:43,050 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=df840831079a0010c945d68e2d3f2c51, ASSIGN; state=OFFLINE, location=0a737713f6e3,46859,1734035782031; forceNewPlan=false, retain=false 2024-12-12T20:38:43,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T20:38:43,201 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:38:43,204 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=df840831079a0010c945d68e2d3f2c51, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:38:43,205 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=4c50796a514e2b2f0c40752ad11371fe, regionState=OPENING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:38:43,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=82, state=RUNNABLE; OpenRegionProcedure df840831079a0010c945d68e2d3f2c51, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:38:43,217 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=83, state=RUNNABLE; OpenRegionProcedure 4c50796a514e2b2f0c40752ad11371fe, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:38:43,374 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:38:43,387 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:38:43,390 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:43,390 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7285): Opening region: {ENCODED => df840831079a0010c945d68e2d3f2c51, NAME => 'testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T20:38:43,396 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. service=AccessControlService 2024-12-12T20:38:43,396 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:38:43,396 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:43,396 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:38:43,397 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7327): checking encryption for df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:43,397 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7330): checking classloading for df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:43,411 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:38:43,411 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7285): Opening region: {ENCODED => 4c50796a514e2b2f0c40752ad11371fe, NAME => 'testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T20:38:43,411 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. service=AccessControlService 2024-12-12T20:38:43,412 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:38:43,412 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:43,412 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:38:43,412 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7327): checking encryption for 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:43,412 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7330): checking classloading for 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:43,416 INFO [StoreOpener-df840831079a0010c945d68e2d3f2c51-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:43,420 INFO [StoreOpener-4c50796a514e2b2f0c40752ad11371fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:43,428 INFO [StoreOpener-df840831079a0010c945d68e2d3f2c51-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region df840831079a0010c945d68e2d3f2c51 columnFamilyName cf 2024-12-12T20:38:43,428 DEBUG [StoreOpener-df840831079a0010c945d68e2d3f2c51-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:38:43,432 INFO [StoreOpener-df840831079a0010c945d68e2d3f2c51-1 {}] regionserver.HStore(327): Store=df840831079a0010c945d68e2d3f2c51/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:38:43,435 INFO [StoreOpener-4c50796a514e2b2f0c40752ad11371fe-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c50796a514e2b2f0c40752ad11371fe columnFamilyName cf 2024-12-12T20:38:43,436 DEBUG [StoreOpener-4c50796a514e2b2f0c40752ad11371fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:38:43,437 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:43,440 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:43,443 INFO [StoreOpener-4c50796a514e2b2f0c40752ad11371fe-1 {}] regionserver.HStore(327): Store=4c50796a514e2b2f0c40752ad11371fe/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:38:43,448 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:43,452 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:43,466 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1085): writing seq id for 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:43,475 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1085): writing seq id for df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:43,492 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:38:43,493 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1102): Opened 4c50796a514e2b2f0c40752ad11371fe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69230030, jitterRate=0.03160783648490906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:38:43,494 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1001): Region open journal for 4c50796a514e2b2f0c40752ad11371fe: 2024-12-12T20:38:43,496 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe., pid=85, masterSystemTime=1734035923387 2024-12-12T20:38:43,500 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:38:43,500 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:38:43,501 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=4c50796a514e2b2f0c40752ad11371fe, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:38:43,507 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:38:43,508 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1102): Opened df840831079a0010c945d68e2d3f2c51; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66306443, jitterRate=-0.01195700466632843}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:38:43,509 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1001): Region open journal for df840831079a0010c945d68e2d3f2c51: 2024-12-12T20:38:43,512 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51., pid=84, masterSystemTime=1734035923374 2024-12-12T20:38:43,526 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:43,526 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:43,547 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=df840831079a0010c945d68e2d3f2c51, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:38:43,548 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=83 2024-12-12T20:38:43,548 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=83, state=SUCCESS; OpenRegionProcedure 4c50796a514e2b2f0c40752ad11371fe, server=0a737713f6e3,36817,1734035782356 in 298 msec 2024-12-12T20:38:43,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4c50796a514e2b2f0c40752ad11371fe, ASSIGN in 501 msec 2024-12-12T20:38:43,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=82 2024-12-12T20:38:43,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=82, state=SUCCESS; OpenRegionProcedure df840831079a0010c945d68e2d3f2c51, server=0a737713f6e3,46859,1734035782031 in 344 msec 2024-12-12T20:38:43,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-12T20:38:43,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=df840831079a0010c945d68e2d3f2c51, ASSIGN in 530 msec 2024-12-12T20:38:43,609 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:38:43,609 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035923609"}]},"ts":"1734035923609"} 2024-12-12T20:38:43,612 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-12T20:38:43,644 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:38:43,645 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-12T20:38:43,648 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-12T20:38:43,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T20:38:43,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:43,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:43,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:43,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:38:43,699 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:43,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T20:38:43,700 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:43,700 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:43,705 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-12T20:38:43,708 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; CreateTableProcedure table=testtb-testConsecutiveExports in 1.1570 sec 2024-12-12T20:38:43,976 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-12T20:38:44,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T20:38:44,671 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports, procId: 81 completed 2024-12-12T20:38:44,672 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-12T20:38:44,672 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:38:44,688 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-12T20:38:44,689 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:38:44,689 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testConsecutiveExports assigned. 2024-12-12T20:38:44,719 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-12T20:38:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035924719 (current time:1734035924719). 2024-12-12T20:38:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:38:44,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-12T20:38:44,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:38:44,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x641d2b7c to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@71d8abf4 2024-12-12T20:38:44,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c88118f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:38:44,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:38:44,755 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:38:44,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x641d2b7c to 127.0.0.1:52655 2024-12-12T20:38:44,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:38:44,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3810d84d to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75ae2a49 2024-12-12T20:38:44,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6936591a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:38:44,803 DEBUG [hconnection-0x5d0ed653-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:38:44,805 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37732, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:38:44,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3810d84d to 127.0.0.1:52655 2024-12-12T20:38:44,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:38:44,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-12T20:38:44,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:38:44,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-12T20:38:44,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-12T20:38:44,829 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:38:44,834 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:38:44,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T20:38:44,858 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:38:44,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742017_1193 (size=161) 2024-12-12T20:38:44,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742017_1193 (size=161) 2024-12-12T20:38:44,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742017_1193 (size=161) 2024-12-12T20:38:44,921 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:38:44,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure df840831079a0010c945d68e2d3f2c51}, {pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 4c50796a514e2b2f0c40752ad11371fe}] 2024-12-12T20:38:44,927 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:44,928 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:44,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T20:38:45,079 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:38:45,079 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:38:45,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=88 2024-12-12T20:38:45,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=87 2024-12-12T20:38:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:38:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.HRegion(2538): Flush status journal for df840831079a0010c945d68e2d3f2c51: 2024-12-12T20:38:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 4c50796a514e2b2f0c40752ad11371fe: 2024-12-12T20:38:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. for emptySnaptb0-testConsecutiveExports completed. 2024-12-12T20:38:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. for emptySnaptb0-testConsecutiveExports completed. 2024-12-12T20:38:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-12T20:38:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:38:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:38:45,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-12T20:38:45,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:38:45,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:38:45,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T20:38:45,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742018_1194 (size=68) 2024-12-12T20:38:45,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742018_1194 (size=68) 2024-12-12T20:38:45,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742018_1194 (size=68) 2024-12-12T20:38:45,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:38:45,160 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-12T20:38:45,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-12T20:38:45,160 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:45,161 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:45,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=86, state=SUCCESS; SnapshotRegionProcedure 4c50796a514e2b2f0c40752ad11371fe in 241 msec 2024-12-12T20:38:45,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742019_1195 (size=68) 2024-12-12T20:38:45,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742019_1195 (size=68) 2024-12-12T20:38:45,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742019_1195 (size=68) 2024-12-12T20:38:45,228 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:45,228 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=87 2024-12-12T20:38:45,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=87 2024-12-12T20:38:45,235 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:45,239 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:45,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=87, resume processing ppid=86 2024-12-12T20:38:45,259 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:38:45,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; SnapshotRegionProcedure df840831079a0010c945d68e2d3f2c51 in 327 msec 2024-12-12T20:38:45,262 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:38:45,271 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:38:45,271 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-12T20:38:45,274 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-12T20:38:45,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742020_1196 (size=543) 2024-12-12T20:38:45,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742020_1196 (size=543) 2024-12-12T20:38:45,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742020_1196 (size=543) 2024-12-12T20:38:45,423 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:38:45,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T20:38:45,457 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:38:45,461 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-12T20:38:45,470 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:38:45,470 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-12T20:38:45,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 649 msec 2024-12-12T20:38:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T20:38:45,948 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 86 completed 2024-12-12T20:38:45,978 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46859 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:38:46,004 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36817 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:38:46,024 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testConsecutiveExports 2024-12-12T20:38:46,024 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:46,024 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:38:46,099 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-12T20:38:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035926099 (current time:1734035926099). 2024-12-12T20:38:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:38:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-12T20:38:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:38:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3cbf4638 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1728b3b2 2024-12-12T20:38:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61446c9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:38:46,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:38:46,152 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37746, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:38:46,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3cbf4638 to 127.0.0.1:52655 2024-12-12T20:38:46,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:38:46,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11ca7f4c to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@432dfbe4 2024-12-12T20:38:46,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14f578a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:38:46,218 DEBUG [hconnection-0x456140cc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:38:46,230 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37756, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:38:46,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11ca7f4c to 127.0.0.1:52655 2024-12-12T20:38:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:38:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-12T20:38:46,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:38:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-12T20:38:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-12T20:38:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T20:38:46,268 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:38:46,272 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:38:46,293 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:38:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T20:38:46,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742021_1197 (size=156) 2024-12-12T20:38:46,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742021_1197 (size=156) 2024-12-12T20:38:46,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742021_1197 (size=156) 2024-12-12T20:38:46,386 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:38:46,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure df840831079a0010c945d68e2d3f2c51}, {pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 4c50796a514e2b2f0c40752ad11371fe}] 2024-12-12T20:38:46,389 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:46,394 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:46,543 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:38:46,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=91 2024-12-12T20:38:46,547 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:38:46,548 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing 4c50796a514e2b2f0c40752ad11371fe 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-12T20:38:46,549 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:38:46,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=90 2024-12-12T20:38:46,550 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:46,550 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing df840831079a0010c945d68e2d3f2c51 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-12T20:38:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T20:38:46,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/.tmp/cf/41bc225f132d4b76a76c086f1d990a1b is 71, key is 036ceb62841eca3a26905f1933909fee/cf:q/1734035925978/Put/seqid=0 2024-12-12T20:38:46,632 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/.tmp/cf/78de66b6f6844a82916c57320669dce7 is 71, key is 10edafc9345736944afc70a91b1aed89/cf:q/1734035926004/Put/seqid=0 2024-12-12T20:38:46,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742023_1199 (size=8188) 2024-12-12T20:38:46,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742023_1199 (size=8188) 2024-12-12T20:38:46,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742022_1198 (size=5422) 2024-12-12T20:38:46,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742022_1198 (size=5422) 2024-12-12T20:38:46,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742022_1198 (size=5422) 2024-12-12T20:38:46,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742023_1199 (size=8188) 2024-12-12T20:38:46,715 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/.tmp/cf/78de66b6f6844a82916c57320669dce7 2024-12-12T20:38:46,718 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0003_000001 (auth:SIMPLE) from 127.0.0.1:59660 2024-12-12T20:38:46,745 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_0/usercache/jenkins/appcache/application_1734035798001_0003/container_1734035798001_0003_01_000001/launch_container.sh] 2024-12-12T20:38:46,745 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_0/usercache/jenkins/appcache/application_1734035798001_0003/container_1734035798001_0003_01_000001/container_tokens] 2024-12-12T20:38:46,745 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_0/usercache/jenkins/appcache/application_1734035798001_0003/container_1734035798001_0003_01_000001/sysfs] 2024-12-12T20:38:46,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/.tmp/cf/78de66b6f6844a82916c57320669dce7 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/cf/78de66b6f6844a82916c57320669dce7 2024-12-12T20:38:46,788 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/cf/78de66b6f6844a82916c57320669dce7, entries=45, sequenceid=6, filesize=8.0 K 2024-12-12T20:38:46,795 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 4c50796a514e2b2f0c40752ad11371fe in 248ms, sequenceid=6, compaction requested=false 2024-12-12T20:38:46,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for 4c50796a514e2b2f0c40752ad11371fe: 2024-12-12T20:38:46,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. for snaptb0-testConsecutiveExports completed. 2024-12-12T20:38:46,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-12T20:38:46,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:38:46,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/cf/78de66b6f6844a82916c57320669dce7] hfiles 2024-12-12T20:38:46,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/cf/78de66b6f6844a82916c57320669dce7 for snapshot=snaptb0-testConsecutiveExports 2024-12-12T20:38:46,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T20:38:46,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742024_1200 (size=107) 2024-12-12T20:38:46,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742024_1200 (size=107) 2024-12-12T20:38:46,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742024_1200 (size=107) 2024-12-12T20:38:46,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:38:46,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91 2024-12-12T20:38:46,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=91 2024-12-12T20:38:46,888 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:46,888 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:38:46,900 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=89, state=SUCCESS; SnapshotRegionProcedure 4c50796a514e2b2f0c40752ad11371fe in 509 msec 2024-12-12T20:38:47,112 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/.tmp/cf/41bc225f132d4b76a76c086f1d990a1b 2024-12-12T20:38:47,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/.tmp/cf/41bc225f132d4b76a76c086f1d990a1b as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/cf/41bc225f132d4b76a76c086f1d990a1b 2024-12-12T20:38:47,183 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/cf/41bc225f132d4b76a76c086f1d990a1b, entries=5, sequenceid=6, filesize=5.3 K 2024-12-12T20:38:47,190 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for df840831079a0010c945d68e2d3f2c51 in 640ms, sequenceid=6, compaction requested=false 2024-12-12T20:38:47,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for df840831079a0010c945d68e2d3f2c51: 2024-12-12T20:38:47,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. for snaptb0-testConsecutiveExports completed. 2024-12-12T20:38:47,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-12T20:38:47,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:38:47,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/cf/41bc225f132d4b76a76c086f1d990a1b] hfiles 2024-12-12T20:38:47,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/cf/41bc225f132d4b76a76c086f1d990a1b for snapshot=snaptb0-testConsecutiveExports 2024-12-12T20:38:47,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742025_1201 (size=107) 2024-12-12T20:38:47,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742025_1201 (size=107) 2024-12-12T20:38:47,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742025_1201 (size=107) 2024-12-12T20:38:47,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:38:47,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-12T20:38:47,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-12T20:38:47,300 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:47,303 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure df840831079a0010c945d68e2d3f2c51 2024-12-12T20:38:47,311 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-12T20:38:47,312 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; SnapshotRegionProcedure df840831079a0010c945d68e2d3f2c51 in 923 msec 2024-12-12T20:38:47,312 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:38:47,319 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:38:47,321 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:38:47,321 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-12T20:38:47,322 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-12T20:38:47,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T20:38:47,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742026_1202 (size=621) 2024-12-12T20:38:47,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742026_1202 (size=621) 2024-12-12T20:38:47,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742026_1202 (size=621) 2024-12-12T20:38:47,443 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:38:47,461 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:38:47,466 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-12T20:38:47,475 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:38:47,475 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-12T20:38:47,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 1.2220 sec 2024-12-12T20:38:47,900 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:38:48,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T20:38:48,376 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 89 completed 2024-12-12T20:38:48,377 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377 2024-12-12T20:38:48,377 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377, srcFsUri=hdfs://localhost:37615, srcDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:38:48,419 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37615, inputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:38:48,419 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@1c2405a5, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-12T20:38:48,422 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-12T20:38:48,435 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-12T20:38:48,495 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:48,496 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:48,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:48,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:49,299 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:38:49,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-17866020607424956671.jar 2024-12-12T20:38:49,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:49,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:49,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-15926994140061002762.jar 2024-12-12T20:38:49,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:49,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:49,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:49,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:49,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:49,592 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-12T20:38:49,592 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T20:38:49,592 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T20:38:49,593 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T20:38:49,593 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T20:38:49,593 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T20:38:49,594 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T20:38:49,594 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-12T20:38:49,595 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T20:38:49,595 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T20:38:49,595 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T20:38:49,596 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T20:38:49,596 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T20:38:49,596 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:38:49,597 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:38:49,597 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:38:49,597 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:38:49,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:38:49,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:38:49,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:38:49,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742027_1203 (size=127628) 2024-12-12T20:38:49,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742027_1203 (size=127628) 2024-12-12T20:38:49,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742027_1203 (size=127628) 2024-12-12T20:38:49,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742028_1204 (size=2172101) 2024-12-12T20:38:49,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742028_1204 (size=2172101) 2024-12-12T20:38:49,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742028_1204 (size=2172101) 2024-12-12T20:38:49,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742029_1205 (size=213228) 2024-12-12T20:38:49,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742029_1205 (size=213228) 2024-12-12T20:38:49,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742029_1205 (size=213228) 2024-12-12T20:38:49,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742030_1206 (size=1877034) 2024-12-12T20:38:49,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742030_1206 (size=1877034) 2024-12-12T20:38:49,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742030_1206 (size=1877034) 2024-12-12T20:38:49,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742031_1207 (size=533455) 2024-12-12T20:38:49,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742031_1207 (size=533455) 2024-12-12T20:38:49,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742031_1207 (size=533455) 2024-12-12T20:38:49,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742032_1208 (size=7280644) 2024-12-12T20:38:49,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742032_1208 (size=7280644) 2024-12-12T20:38:49,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742032_1208 (size=7280644) 2024-12-12T20:38:49,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742033_1209 (size=4188619) 2024-12-12T20:38:49,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742033_1209 (size=4188619) 2024-12-12T20:38:49,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742033_1209 (size=4188619) 2024-12-12T20:38:50,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742034_1210 (size=20406) 2024-12-12T20:38:50,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742034_1210 (size=20406) 2024-12-12T20:38:50,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742034_1210 (size=20406) 2024-12-12T20:38:50,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742035_1211 (size=6350857) 2024-12-12T20:38:50,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742035_1211 (size=6350857) 2024-12-12T20:38:50,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742035_1211 (size=6350857) 2024-12-12T20:38:50,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742036_1212 (size=75495) 2024-12-12T20:38:50,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742036_1212 (size=75495) 2024-12-12T20:38:50,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742036_1212 (size=75495) 2024-12-12T20:38:50,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742037_1213 (size=45609) 2024-12-12T20:38:50,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742037_1213 (size=45609) 2024-12-12T20:38:50,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742037_1213 (size=45609) 2024-12-12T20:38:50,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742038_1214 (size=110084) 2024-12-12T20:38:50,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742038_1214 (size=110084) 2024-12-12T20:38:50,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742038_1214 (size=110084) 2024-12-12T20:38:50,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742039_1215 (size=1323991) 2024-12-12T20:38:50,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742039_1215 (size=1323991) 2024-12-12T20:38:50,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742039_1215 (size=1323991) 2024-12-12T20:38:50,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742040_1216 (size=23076) 2024-12-12T20:38:50,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742040_1216 (size=23076) 2024-12-12T20:38:50,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742040_1216 (size=23076) 2024-12-12T20:38:50,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742041_1217 (size=126803) 2024-12-12T20:38:50,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742041_1217 (size=126803) 2024-12-12T20:38:50,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742041_1217 (size=126803) 2024-12-12T20:38:50,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742042_1218 (size=322274) 2024-12-12T20:38:50,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742042_1218 (size=322274) 2024-12-12T20:38:50,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742042_1218 (size=322274) 2024-12-12T20:38:50,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742043_1219 (size=1832290) 2024-12-12T20:38:50,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742043_1219 (size=1832290) 2024-12-12T20:38:50,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742043_1219 (size=1832290) 2024-12-12T20:38:50,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742044_1220 (size=30081) 2024-12-12T20:38:50,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742044_1220 (size=30081) 2024-12-12T20:38:50,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742044_1220 (size=30081) 2024-12-12T20:38:50,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742045_1221 (size=53616) 2024-12-12T20:38:50,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742045_1221 (size=53616) 2024-12-12T20:38:50,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742045_1221 (size=53616) 2024-12-12T20:38:50,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742046_1222 (size=29229) 2024-12-12T20:38:50,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742046_1222 (size=29229) 2024-12-12T20:38:50,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742046_1222 (size=29229) 2024-12-12T20:38:50,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742047_1223 (size=169089) 2024-12-12T20:38:50,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742047_1223 (size=169089) 2024-12-12T20:38:50,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742047_1223 (size=169089) 2024-12-12T20:38:50,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742048_1224 (size=451756) 2024-12-12T20:38:50,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742048_1224 (size=451756) 2024-12-12T20:38:50,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742048_1224 (size=451756) 2024-12-12T20:38:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-12T20:38:51,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-12T20:38:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-12T20:38:51,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742049_1225 (size=5175431) 2024-12-12T20:38:51,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742049_1225 (size=5175431) 2024-12-12T20:38:51,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742049_1225 (size=5175431) 2024-12-12T20:38:51,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742050_1226 (size=136454) 2024-12-12T20:38:51,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742050_1226 (size=136454) 2024-12-12T20:38:51,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742050_1226 (size=136454) 2024-12-12T20:38:51,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742051_1227 (size=907861) 2024-12-12T20:38:51,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742051_1227 (size=907861) 2024-12-12T20:38:51,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742051_1227 (size=907861) 2024-12-12T20:38:51,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742052_1228 (size=3317408) 2024-12-12T20:38:51,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742052_1228 (size=3317408) 2024-12-12T20:38:51,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742052_1228 (size=3317408) 2024-12-12T20:38:51,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742053_1229 (size=503880) 2024-12-12T20:38:51,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742053_1229 (size=503880) 2024-12-12T20:38:51,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742053_1229 (size=503880) 2024-12-12T20:38:51,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742054_1230 (size=4695811) 2024-12-12T20:38:51,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742054_1230 (size=4695811) 2024-12-12T20:38:51,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742054_1230 (size=4695811) 2024-12-12T20:38:51,800 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T20:38:51,808 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-12T20:38:51,813 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-12T20:38:51,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742055_1231 (size=338) 2024-12-12T20:38:51,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742055_1231 (size=338) 2024-12-12T20:38:51,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742055_1231 (size=338) 2024-12-12T20:38:51,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742056_1232 (size=15) 2024-12-12T20:38:51,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742056_1232 (size=15) 2024-12-12T20:38:51,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742056_1232 (size=15) 2024-12-12T20:38:51,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742057_1233 (size=304934) 2024-12-12T20:38:51,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742057_1233 (size=304934) 2024-12-12T20:38:51,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742057_1233 (size=304934) 2024-12-12T20:38:52,020 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:38:52,020 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:38:52,263 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0004_000001 (auth:SIMPLE) from 127.0.0.1:57412 2024-12-12T20:38:56,629 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:39:01,796 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0004_000001 (auth:SIMPLE) from 127.0.0.1:48176 2024-12-12T20:39:02,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742058_1234 (size=350608) 2024-12-12T20:39:02,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742058_1234 (size=350608) 2024-12-12T20:39:02,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742058_1234 (size=350608) 2024-12-12T20:39:04,444 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0004_000001 (auth:SIMPLE) from 127.0.0.1:44910 2024-12-12T20:39:11,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742059_1235 (size=17447) 2024-12-12T20:39:11,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742059_1235 (size=17447) 2024-12-12T20:39:11,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742059_1235 (size=17447) 2024-12-12T20:39:11,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742060_1236 (size=462) 2024-12-12T20:39:11,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742060_1236 (size=462) 2024-12-12T20:39:11,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742060_1236 (size=462) 2024-12-12T20:39:11,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742061_1237 (size=17447) 2024-12-12T20:39:11,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742061_1237 (size=17447) 2024-12-12T20:39:11,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742061_1237 (size=17447) 2024-12-12T20:39:11,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742062_1238 (size=350608) 2024-12-12T20:39:11,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742062_1238 (size=350608) 2024-12-12T20:39:11,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742062_1238 (size=350608) 2024-12-12T20:39:13,619 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-12T20:39:13,619 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-12T20:39:13,632 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-12T20:39:13,632 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-12T20:39:13,632 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-12T20:39:13,632 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-12T20:39:13,657 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-12T20:39:13,657 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-12T20:39:13,657 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@1c2405a5 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-12T20:39:13,659 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-12T20:39:13,659 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-12T20:39:13,665 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377, srcFsUri=hdfs://localhost:37615, srcDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:39:13,724 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37615, inputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:39:13,724 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@1c2405a5, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-12T20:39:13,743 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-12T20:39:13,770 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-12T20:39:13,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:13,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:13,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:13,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:15,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-13176200255284214603.jar 2024-12-12T20:39:15,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:15,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:15,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-17265678661156019484.jar 2024-12-12T20:39:15,155 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:15,155 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:15,156 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:15,156 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:15,156 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:15,157 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:15,157 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T20:39:15,157 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T20:39:15,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T20:39:15,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T20:39:15,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T20:39:15,159 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T20:39:15,159 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-12T20:39:15,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T20:39:15,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T20:39:15,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T20:39:15,165 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T20:39:15,165 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T20:39:15,165 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:39:15,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:39:15,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:39:15,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:39:15,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:39:15,167 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:39:15,167 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:39:15,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742063_1239 (size=127628) 2024-12-12T20:39:15,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742063_1239 (size=127628) 2024-12-12T20:39:15,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742063_1239 (size=127628) 2024-12-12T20:39:15,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742064_1240 (size=2172101) 2024-12-12T20:39:15,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742064_1240 (size=2172101) 2024-12-12T20:39:15,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742064_1240 (size=2172101) 2024-12-12T20:39:15,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742065_1241 (size=213228) 2024-12-12T20:39:15,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742065_1241 (size=213228) 2024-12-12T20:39:15,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742065_1241 (size=213228) 2024-12-12T20:39:15,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742066_1242 (size=1877034) 2024-12-12T20:39:15,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742066_1242 (size=1877034) 2024-12-12T20:39:15,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742066_1242 (size=1877034) 2024-12-12T20:39:15,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742067_1243 (size=533455) 2024-12-12T20:39:15,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742067_1243 (size=533455) 2024-12-12T20:39:15,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742067_1243 (size=533455) 2024-12-12T20:39:16,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742068_1244 (size=7280644) 2024-12-12T20:39:16,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742068_1244 (size=7280644) 2024-12-12T20:39:16,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742068_1244 (size=7280644) 2024-12-12T20:39:16,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742069_1245 (size=4188619) 2024-12-12T20:39:16,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742069_1245 (size=4188619) 2024-12-12T20:39:16,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742069_1245 (size=4188619) 2024-12-12T20:39:16,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742070_1246 (size=20406) 2024-12-12T20:39:16,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742070_1246 (size=20406) 2024-12-12T20:39:16,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742070_1246 (size=20406) 2024-12-12T20:39:16,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742071_1247 (size=75495) 2024-12-12T20:39:16,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742071_1247 (size=75495) 2024-12-12T20:39:16,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742071_1247 (size=75495) 2024-12-12T20:39:16,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742072_1248 (size=45609) 2024-12-12T20:39:16,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742072_1248 (size=45609) 2024-12-12T20:39:16,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742072_1248 (size=45609) 2024-12-12T20:39:16,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742073_1249 (size=110084) 2024-12-12T20:39:16,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742073_1249 (size=110084) 2024-12-12T20:39:16,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742073_1249 (size=110084) 2024-12-12T20:39:16,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742074_1250 (size=1323991) 2024-12-12T20:39:16,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742074_1250 (size=1323991) 2024-12-12T20:39:16,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742074_1250 (size=1323991) 2024-12-12T20:39:16,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742075_1251 (size=23076) 2024-12-12T20:39:16,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742075_1251 (size=23076) 2024-12-12T20:39:16,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742075_1251 (size=23076) 2024-12-12T20:39:16,398 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_3/usercache/jenkins/appcache/application_1734035798001_0004/container_1734035798001_0004_01_000002/launch_container.sh] 2024-12-12T20:39:16,398 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_3/usercache/jenkins/appcache/application_1734035798001_0004/container_1734035798001_0004_01_000002/container_tokens] 2024-12-12T20:39:16,398 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_3/usercache/jenkins/appcache/application_1734035798001_0004/container_1734035798001_0004_01_000002/sysfs] 2024-12-12T20:39:16,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742076_1252 (size=126803) 2024-12-12T20:39:16,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742076_1252 (size=126803) 2024-12-12T20:39:16,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742076_1252 (size=126803) 2024-12-12T20:39:16,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742077_1253 (size=322274) 2024-12-12T20:39:16,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742077_1253 (size=322274) 2024-12-12T20:39:16,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742077_1253 (size=322274) 2024-12-12T20:39:16,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742078_1254 (size=1832290) 2024-12-12T20:39:17,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742078_1254 (size=1832290) 2024-12-12T20:39:17,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742078_1254 (size=1832290) 2024-12-12T20:39:17,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742079_1255 (size=30081) 2024-12-12T20:39:17,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742079_1255 (size=30081) 2024-12-12T20:39:17,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742079_1255 (size=30081) 2024-12-12T20:39:17,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742080_1256 (size=53616) 2024-12-12T20:39:17,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742080_1256 (size=53616) 2024-12-12T20:39:17,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742080_1256 (size=53616) 2024-12-12T20:39:17,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742081_1257 (size=29229) 2024-12-12T20:39:17,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742081_1257 (size=29229) 2024-12-12T20:39:17,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742081_1257 (size=29229) 2024-12-12T20:39:17,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742082_1258 (size=169089) 2024-12-12T20:39:17,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742082_1258 (size=169089) 2024-12-12T20:39:17,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742082_1258 (size=169089) 2024-12-12T20:39:17,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742083_1259 (size=6350857) 2024-12-12T20:39:17,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742083_1259 (size=6350857) 2024-12-12T20:39:17,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742083_1259 (size=6350857) 2024-12-12T20:39:17,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742084_1260 (size=451756) 2024-12-12T20:39:17,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742084_1260 (size=451756) 2024-12-12T20:39:17,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742084_1260 (size=451756) 2024-12-12T20:39:18,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742085_1261 (size=5175431) 2024-12-12T20:39:18,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742085_1261 (size=5175431) 2024-12-12T20:39:18,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742085_1261 (size=5175431) 2024-12-12T20:39:18,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742086_1262 (size=136454) 2024-12-12T20:39:18,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742086_1262 (size=136454) 2024-12-12T20:39:18,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742086_1262 (size=136454) 2024-12-12T20:39:18,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742087_1263 (size=907861) 2024-12-12T20:39:18,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742087_1263 (size=907861) 2024-12-12T20:39:18,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742087_1263 (size=907861) 2024-12-12T20:39:18,398 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0004_000001 (auth:SIMPLE) from 127.0.0.1:33694 2024-12-12T20:39:18,414 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_3/usercache/jenkins/appcache/application_1734035798001_0004/container_1734035798001_0004_01_000001/launch_container.sh] 2024-12-12T20:39:18,414 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_3/usercache/jenkins/appcache/application_1734035798001_0004/container_1734035798001_0004_01_000001/container_tokens] 2024-12-12T20:39:18,414 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_3/usercache/jenkins/appcache/application_1734035798001_0004/container_1734035798001_0004_01_000001/sysfs] 2024-12-12T20:39:18,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742088_1264 (size=3317408) 2024-12-12T20:39:18,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742088_1264 (size=3317408) 2024-12-12T20:39:18,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742088_1264 (size=3317408) 2024-12-12T20:39:18,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742089_1265 (size=503880) 2024-12-12T20:39:18,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742089_1265 (size=503880) 2024-12-12T20:39:18,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742089_1265 (size=503880) 2024-12-12T20:39:18,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742090_1266 (size=4695811) 2024-12-12T20:39:18,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742090_1266 (size=4695811) 2024-12-12T20:39:18,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742090_1266 (size=4695811) 2024-12-12T20:39:18,547 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T20:39:18,551 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-12T20:39:18,557 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-12T20:39:18,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742091_1267 (size=338) 2024-12-12T20:39:18,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742091_1267 (size=338) 2024-12-12T20:39:18,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742091_1267 (size=338) 2024-12-12T20:39:18,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742092_1268 (size=15) 2024-12-12T20:39:18,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742092_1268 (size=15) 2024-12-12T20:39:18,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742092_1268 (size=15) 2024-12-12T20:39:18,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742093_1269 (size=304934) 2024-12-12T20:39:18,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742093_1269 (size=304934) 2024-12-12T20:39:18,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742093_1269 (size=304934) 2024-12-12T20:39:18,629 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:39:18,629 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:39:18,752 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0005_000001 (auth:SIMPLE) from 127.0.0.1:50728 2024-12-12T20:39:19,300 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:39:27,525 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0005_000001 (auth:SIMPLE) from 127.0.0.1:38172 2024-12-12T20:39:27,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742094_1270 (size=350608) 2024-12-12T20:39:27,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742094_1270 (size=350608) 2024-12-12T20:39:27,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742094_1270 (size=350608) 2024-12-12T20:39:28,397 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region df840831079a0010c945d68e2d3f2c51, had cached 0 bytes from a total of 5422 2024-12-12T20:39:28,412 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 4c50796a514e2b2f0c40752ad11371fe, had cached 0 bytes from a total of 8188 2024-12-12T20:39:28,418 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 4c50796a514e2b2f0c40752ad11371fe changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:39:28,418 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region df840831079a0010c945d68e2d3f2c51 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:39:30,287 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0005_000001 (auth:SIMPLE) from 127.0.0.1:45116 2024-12-12T20:39:38,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742095_1271 (size=16925) 2024-12-12T20:39:38,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742095_1271 (size=16925) 2024-12-12T20:39:38,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742095_1271 (size=16925) 2024-12-12T20:39:38,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742096_1272 (size=462) 2024-12-12T20:39:38,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742096_1272 (size=462) 2024-12-12T20:39:38,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742096_1272 (size=462) 2024-12-12T20:39:38,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742097_1273 (size=16925) 2024-12-12T20:39:38,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742097_1273 (size=16925) 2024-12-12T20:39:38,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742097_1273 (size=16925) 2024-12-12T20:39:38,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742098_1274 (size=350608) 2024-12-12T20:39:38,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742098_1274 (size=350608) 2024-12-12T20:39:38,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742098_1274 (size=350608) 2024-12-12T20:39:38,484 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0005_000001 (auth:SIMPLE) from 127.0.0.1:45130 2024-12-12T20:39:40,272 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-12T20:39:40,273 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-12T20:39:40,296 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-12T20:39:40,297 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-12T20:39:40,297 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-12T20:39:40,297 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-12T20:39:40,299 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-12T20:39:40,299 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-12T20:39:40,299 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@1c2405a5 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-12T20:39:40,299 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-12T20:39:40,299 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734035928377/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-12T20:39:40,332 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testConsecutiveExports 2024-12-12T20:39:40,336 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-12T20:39:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-12T20:39:40,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-12T20:39:40,376 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035980375"}]},"ts":"1734035980375"} 2024-12-12T20:39:40,388 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-12T20:39:40,415 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-12T20:39:40,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-12T20:39:40,447 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=df840831079a0010c945d68e2d3f2c51, UNASSIGN}, {pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4c50796a514e2b2f0c40752ad11371fe, UNASSIGN}] 2024-12-12T20:39:40,456 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4c50796a514e2b2f0c40752ad11371fe, UNASSIGN 2024-12-12T20:39:40,459 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=df840831079a0010c945d68e2d3f2c51, UNASSIGN 2024-12-12T20:39:40,460 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=4c50796a514e2b2f0c40752ad11371fe, regionState=CLOSING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:39:40,461 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=df840831079a0010c945d68e2d3f2c51, regionState=CLOSING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:39:40,470 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:39:40,470 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 4c50796a514e2b2f0c40752ad11371fe, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:39:40,472 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:39:40,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=94, state=RUNNABLE; CloseRegionProcedure df840831079a0010c945d68e2d3f2c51, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:39:40,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-12T20:39:40,623 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:39:40,625 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:39:40,625 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:39:40,626 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 4c50796a514e2b2f0c40752ad11371fe, disabling compactions & flushes 2024-12-12T20:39:40,626 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:39:40,626 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:39:40,626 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. after waiting 0 ms 2024-12-12T20:39:40,626 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:39:40,626 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:39:40,631 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(124): Close df840831079a0010c945d68e2d3f2c51 2024-12-12T20:39:40,631 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:39:40,632 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1681): Closing df840831079a0010c945d68e2d3f2c51, disabling compactions & flushes 2024-12-12T20:39:40,632 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:39:40,632 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:39:40,632 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. after waiting 0 ms 2024-12-12T20:39:40,632 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:39:40,667 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:39:40,667 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:39:40,675 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:39:40,675 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe. 2024-12-12T20:39:40,676 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 4c50796a514e2b2f0c40752ad11371fe: 2024-12-12T20:39:40,678 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:39:40,678 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51. 2024-12-12T20:39:40,678 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1635): Region close journal for df840831079a0010c945d68e2d3f2c51: 2024-12-12T20:39:40,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-12T20:39:40,681 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:39:40,683 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=4c50796a514e2b2f0c40752ad11371fe, regionState=CLOSED 2024-12-12T20:39:40,691 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(170): Closed df840831079a0010c945d68e2d3f2c51 2024-12-12T20:39:40,693 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=df840831079a0010c945d68e2d3f2c51, regionState=CLOSED 2024-12-12T20:39:40,702 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-12T20:39:40,702 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 4c50796a514e2b2f0c40752ad11371fe, server=0a737713f6e3,36817,1734035782356 in 222 msec 2024-12-12T20:39:40,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4c50796a514e2b2f0c40752ad11371fe, UNASSIGN in 255 msec 2024-12-12T20:39:40,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=94 2024-12-12T20:39:40,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=94, state=SUCCESS; CloseRegionProcedure df840831079a0010c945d68e2d3f2c51, server=0a737713f6e3,46859,1734035782031 in 238 msec 2024-12-12T20:39:40,722 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-12T20:39:40,722 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=df840831079a0010c945d68e2d3f2c51, UNASSIGN in 271 msec 2024-12-12T20:39:40,737 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035980737"}]},"ts":"1734035980737"} 2024-12-12T20:39:40,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-12T20:39:40,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 298 msec 2024-12-12T20:39:40,739 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-12T20:39:40,789 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-12T20:39:40,800 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; DisableTableProcedure table=testtb-testConsecutiveExports in 457 msec 2024-12-12T20:39:40,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-12T20:39:40,984 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports, procId: 92 completed 2024-12-12T20:39:40,989 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-12T20:39:40,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T20:39:40,992 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T20:39:40,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-12T20:39:40,994 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=98, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T20:39:40,995 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-12T20:39:41,008 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51 2024-12-12T20:39:41,011 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:39:41,029 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/recovered.edits] 2024-12-12T20:39:41,035 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/recovered.edits] 2024-12-12T20:39:41,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T20:39:41,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T20:39:41,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T20:39:41,072 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-12T20:39:41,072 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-12T20:39:41,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T20:39:41,091 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/cf/41bc225f132d4b76a76c086f1d990a1b to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/cf/41bc225f132d4b76a76c086f1d990a1b 2024-12-12T20:39:41,092 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/cf/78de66b6f6844a82916c57320669dce7 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/cf/78de66b6f6844a82916c57320669dce7 2024-12-12T20:39:41,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:41,111 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-12T20:39:41,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:41,111 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-12T20:39:41,111 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-12T20:39:41,111 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-12T20:39:41,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T20:39:41,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:41,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T20:39:41,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T20:39:41,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:41,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-12T20:39:41,147 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe/recovered.edits/9.seqid 2024-12-12T20:39:41,156 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/4c50796a514e2b2f0c40752ad11371fe 2024-12-12T20:39:41,157 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51/recovered.edits/9.seqid 2024-12-12T20:39:41,160 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testConsecutiveExports/df840831079a0010c945d68e2d3f2c51 2024-12-12T20:39:41,160 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-12T20:39:41,169 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=98, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T20:39:41,184 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-12T20:39:41,196 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-12T20:39:41,204 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=98, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T20:39:41,204 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-12T20:39:41,205 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734035981204"}]},"ts":"9223372036854775807"} 2024-12-12T20:39:41,205 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734035981204"}]},"ts":"9223372036854775807"} 2024-12-12T20:39:41,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T20:39:41,224 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-12T20:39:41,224 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => df840831079a0010c945d68e2d3f2c51, NAME => 'testtb-testConsecutiveExports,,1734035922543.df840831079a0010c945d68e2d3f2c51.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 4c50796a514e2b2f0c40752ad11371fe, NAME => 'testtb-testConsecutiveExports,1,1734035922543.4c50796a514e2b2f0c40752ad11371fe.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T20:39:41,224 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-12T20:39:41,224 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734035981224"}]},"ts":"9223372036854775807"} 2024-12-12T20:39:41,253 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testConsecutiveExports state from META 2024-12-12T20:39:41,277 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=98, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T20:39:41,287 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; DeleteTableProcedure table=testtb-testConsecutiveExports in 288 msec 2024-12-12T20:39:41,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T20:39:41,415 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports, procId: 98 completed 2024-12-12T20:39:41,434 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" 2024-12-12T20:39:41,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-12T20:39:41,438 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" 2024-12-12T20:39:41,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-12T20:39:41,465 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=789 (was 784) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:57164 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41295 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:39926 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:46692 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3907 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:41295 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1677217105_1 at /127.0.0.1:40482 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 65093) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=794 (was 802), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1454 (was 1844), ProcessCount=20 (was 20), AvailableMemoryMB=3353 (was 2741) - AvailableMemoryMB LEAK? - 2024-12-12T20:39:41,466 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=789 is superior to 500 2024-12-12T20:39:41,500 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=789, OpenFileDescriptor=794, MaxFileDescriptor=1048576, SystemLoadAverage=1454, ProcessCount=20, AvailableMemoryMB=3345 2024-12-12T20:39:41,500 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=789 is superior to 500 2024-12-12T20:39:41,513 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:39:41,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:41,516 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:39:41,517 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:39:41,524 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 99 2024-12-12T20:39:41,524 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:39:41,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-12T20:39:41,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742099_1275 (size=422) 2024-12-12T20:39:41,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742099_1275 (size=422) 2024-12-12T20:39:41,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742099_1275 (size=422) 2024-12-12T20:39:41,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-12T20:39:41,642 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8aad17ad3243c93d19b0a0f31dc6ae44, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:39:41,650 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => bf7783e575074cc3a4ec1d197e979973, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:39:41,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742101_1277 (size=83) 2024-12-12T20:39:41,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742101_1277 (size=83) 2024-12-12T20:39:41,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742101_1277 (size=83) 2024-12-12T20:39:41,799 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:39:41,800 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1681): Closing bf7783e575074cc3a4ec1d197e979973, disabling compactions & flushes 2024-12-12T20:39:41,800 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:39:41,800 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:39:41,800 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. after waiting 0 ms 2024-12-12T20:39:41,800 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:39:41,800 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:39:41,800 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1635): Region close journal for bf7783e575074cc3a4ec1d197e979973: 2024-12-12T20:39:41,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742100_1276 (size=83) 2024-12-12T20:39:41,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742100_1276 (size=83) 2024-12-12T20:39:41,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-12T20:39:41,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742100_1276 (size=83) 2024-12-12T20:39:41,843 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:39:41,843 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1681): Closing 8aad17ad3243c93d19b0a0f31dc6ae44, disabling compactions & flushes 2024-12-12T20:39:41,843 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:41,843 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:41,844 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. after waiting 0 ms 2024-12-12T20:39:41,844 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:41,844 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:41,844 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8aad17ad3243c93d19b0a0f31dc6ae44: 2024-12-12T20:39:41,846 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:39:41,847 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734035981846"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035981846"}]},"ts":"1734035981846"} 2024-12-12T20:39:41,847 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734035981846"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035981846"}]},"ts":"1734035981846"} 2024-12-12T20:39:41,857 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:39:41,867 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:39:41,867 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035981867"}]},"ts":"1734035981867"} 2024-12-12T20:39:41,875 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-12T20:39:41,902 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:39:41,908 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:39:41,908 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:39:41,908 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:39:41,909 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:39:41,909 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:39:41,909 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:39:41,909 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:39:41,909 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8aad17ad3243c93d19b0a0f31dc6ae44, ASSIGN}, {pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bf7783e575074cc3a4ec1d197e979973, ASSIGN}] 2024-12-12T20:39:41,913 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8aad17ad3243c93d19b0a0f31dc6ae44, ASSIGN 2024-12-12T20:39:41,914 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bf7783e575074cc3a4ec1d197e979973, ASSIGN 2024-12-12T20:39:41,915 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8aad17ad3243c93d19b0a0f31dc6ae44, ASSIGN; state=OFFLINE, location=0a737713f6e3,36817,1734035782356; forceNewPlan=false, retain=false 2024-12-12T20:39:41,916 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bf7783e575074cc3a4ec1d197e979973, ASSIGN; state=OFFLINE, location=0a737713f6e3,46859,1734035782031; forceNewPlan=false, retain=false 2024-12-12T20:39:42,065 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:39:42,066 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=bf7783e575074cc3a4ec1d197e979973, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:39:42,066 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=8aad17ad3243c93d19b0a0f31dc6ae44, regionState=OPENING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:39:42,079 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=100, state=RUNNABLE; OpenRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:39:42,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure bf7783e575074cc3a4ec1d197e979973, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:39:42,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-12T20:39:42,233 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:39:42,235 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:39:42,247 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:42,248 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => 8aad17ad3243c93d19b0a0f31dc6ae44, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T20:39:42,248 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. service=AccessControlService 2024-12-12T20:39:42,249 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:39:42,249 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:42,249 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:39:42,249 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:42,249 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:42,253 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:39:42,253 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => bf7783e575074cc3a4ec1d197e979973, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T20:39:42,254 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. service=AccessControlService 2024-12-12T20:39:42,254 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:39:42,255 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:42,255 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:39:42,255 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:42,255 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:42,263 INFO [StoreOpener-8aad17ad3243c93d19b0a0f31dc6ae44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:42,276 INFO [StoreOpener-8aad17ad3243c93d19b0a0f31dc6ae44-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8aad17ad3243c93d19b0a0f31dc6ae44 columnFamilyName cf 2024-12-12T20:39:42,276 DEBUG [StoreOpener-8aad17ad3243c93d19b0a0f31dc6ae44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:39:42,280 INFO [StoreOpener-bf7783e575074cc3a4ec1d197e979973-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:42,283 INFO [StoreOpener-8aad17ad3243c93d19b0a0f31dc6ae44-1 {}] regionserver.HStore(327): Store=8aad17ad3243c93d19b0a0f31dc6ae44/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:39:42,291 INFO [StoreOpener-bf7783e575074cc3a4ec1d197e979973-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bf7783e575074cc3a4ec1d197e979973 columnFamilyName cf 2024-12-12T20:39:42,291 DEBUG [StoreOpener-bf7783e575074cc3a4ec1d197e979973-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:39:42,292 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:42,292 INFO [StoreOpener-bf7783e575074cc3a4ec1d197e979973-1 {}] regionserver.HStore(327): Store=bf7783e575074cc3a4ec1d197e979973/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:39:42,294 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:42,297 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:42,297 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:42,314 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:42,323 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:42,332 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:39:42,341 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened bf7783e575074cc3a4ec1d197e979973; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69057752, jitterRate=0.02904069423675537}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:39:42,342 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for bf7783e575074cc3a4ec1d197e979973: 2024-12-12T20:39:42,346 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973., pid=103, masterSystemTime=1734035982235 2024-12-12T20:39:42,349 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:39:42,352 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened 8aad17ad3243c93d19b0a0f31dc6ae44; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72285805, jitterRate=0.07714243233203888}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:39:42,352 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for 8aad17ad3243c93d19b0a0f31dc6ae44: 2024-12-12T20:39:42,355 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:39:42,355 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44., pid=102, masterSystemTime=1734035982233 2024-12-12T20:39:42,355 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:39:42,362 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=bf7783e575074cc3a4ec1d197e979973, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:39:42,367 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:42,367 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:42,367 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=8aad17ad3243c93d19b0a0f31dc6ae44, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:39:42,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-12-12T20:39:42,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure bf7783e575074cc3a4ec1d197e979973, server=0a737713f6e3,46859,1734035782031 in 289 msec 2024-12-12T20:39:42,390 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=100 2024-12-12T20:39:42,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=100, state=SUCCESS; OpenRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44, server=0a737713f6e3,36817,1734035782356 in 293 msec 2024-12-12T20:39:42,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bf7783e575074cc3a4ec1d197e979973, ASSIGN in 470 msec 2024-12-12T20:39:42,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-12T20:39:42,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8aad17ad3243c93d19b0a0f31dc6ae44, ASSIGN in 483 msec 2024-12-12T20:39:42,411 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:39:42,411 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035982411"}]},"ts":"1734035982411"} 2024-12-12T20:39:42,413 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-12T20:39:42,448 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:39:42,449 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-12T20:39:42,456 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-12T20:39:42,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:42,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:42,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:42,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:42,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:42,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:42,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:42,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:42,545 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 1.0300 sec 2024-12-12T20:39:42,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-12T20:39:42,649 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 99 completed 2024-12-12T20:39:42,649 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-12T20:39:42,650 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:39:42,668 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-12T20:39:42,668 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:39:42,668 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-12T20:39:42,676 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-12T20:39:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035982676 (current time:1734035982676). 2024-12-12T20:39:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:39:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-12T20:39:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:39:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b5ed1b0 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c78d06a 2024-12-12T20:39:42,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@310c9786, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:39:42,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:39:42,732 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34404, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:39:42,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b5ed1b0 to 127.0.0.1:52655 2024-12-12T20:39:42,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:39:42,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2bf6c309 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5177bd9d 2024-12-12T20:39:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@492a092, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:39:42,861 DEBUG [hconnection-0x203754c0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:39:42,864 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34408, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:39:42,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2bf6c309 to 127.0.0.1:52655 2024-12-12T20:39:42,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:39:42,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-12T20:39:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:39:42,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-12T20:39:42,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-12T20:39:42,892 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:39:42,894 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:39:42,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T20:39:42,903 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:39:42,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742102_1278 (size=215) 2024-12-12T20:39:42,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742102_1278 (size=215) 2024-12-12T20:39:42,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742102_1278 (size=215) 2024-12-12T20:39:42,995 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:39:42,996 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44}, {pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure bf7783e575074cc3a4ec1d197e979973}] 2024-12-12T20:39:43,003 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:43,003 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:43,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T20:39:43,159 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:39:43,160 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:39:43,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-12T20:39:43,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-12T20:39:43,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:39:43,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2538): Flush status journal for bf7783e575074cc3a4ec1d197e979973: 2024-12-12T20:39:43,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-12T20:39:43,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:43,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:39:43,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:39:43,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:43,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 8aad17ad3243c93d19b0a0f31dc6ae44: 2024-12-12T20:39:43,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-12T20:39:43,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:43,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:39:43,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:39:43,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T20:39:43,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742103_1279 (size=86) 2024-12-12T20:39:43,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742103_1279 (size=86) 2024-12-12T20:39:43,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742103_1279 (size=86) 2024-12-12T20:39:43,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:39:43,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-12T20:39:43,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=106 2024-12-12T20:39:43,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:43,236 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:43,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=104, state=SUCCESS; SnapshotRegionProcedure bf7783e575074cc3a4ec1d197e979973 in 244 msec 2024-12-12T20:39:43,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742104_1280 (size=86) 2024-12-12T20:39:43,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742104_1280 (size=86) 2024-12-12T20:39:43,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742104_1280 (size=86) 2024-12-12T20:39:43,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:43,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-12T20:39:43,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-12T20:39:43,275 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:43,275 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:43,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-12T20:39:43,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; SnapshotRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44 in 281 msec 2024-12-12T20:39:43,288 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:39:43,293 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:39:43,300 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:39:43,300 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:43,302 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:43,309 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0005/container_1734035798001_0005_01_000002/launch_container.sh] 2024-12-12T20:39:43,309 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0005/container_1734035798001_0005_01_000002/container_tokens] 2024-12-12T20:39:43,309 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0005/container_1734035798001_0005_01_000002/sysfs] 2024-12-12T20:39:43,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742105_1281 (size=597) 2024-12-12T20:39:43,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742105_1281 (size=597) 2024-12-12T20:39:43,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742105_1281 (size=597) 2024-12-12T20:39:43,407 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:39:43,439 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:39:43,447 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:43,455 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:39:43,456 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-12T20:39:43,467 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 568 msec 2024-12-12T20:39:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T20:39:43,521 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 104 completed 2024-12-12T20:39:43,577 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36817 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:39:43,585 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46859 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:39:43,605 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:43,605 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:43,605 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:39:43,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-12T20:39:43,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035983685 (current time:1734035983685). 2024-12-12T20:39:43,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:39:43,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-12T20:39:43,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:39:43,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x440f4adc to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50b7401d 2024-12-12T20:39:43,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9324e2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:39:43,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:39:43,756 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34422, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:39:43,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x440f4adc to 127.0.0.1:52655 2024-12-12T20:39:43,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:39:43,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d65525a to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b85043 2024-12-12T20:39:43,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ce01d80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:39:43,847 DEBUG [hconnection-0x5afe0d8b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:39:43,853 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34424, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:39:43,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d65525a to 127.0.0.1:52655 2024-12-12T20:39:43,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:39:43,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-12T20:39:43,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:39:43,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-12T20:39:43,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-12T20:39:43,872 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:39:43,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-12T20:39:43,878 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:39:43,891 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:39:43,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742106_1282 (size=210) 2024-12-12T20:39:43,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742106_1282 (size=210) 2024-12-12T20:39:43,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742106_1282 (size=210) 2024-12-12T20:39:43,943 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:39:43,944 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44}, {pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure bf7783e575074cc3a4ec1d197e979973}] 2024-12-12T20:39:43,956 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:43,957 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:43,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-12T20:39:43,991 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-12T20:39:44,110 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:39:44,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=109 2024-12-12T20:39:44,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:39:44,111 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing bf7783e575074cc3a4ec1d197e979973 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-12T20:39:44,122 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:39:44,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=108 2024-12-12T20:39:44,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:44,125 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2837): Flushing 8aad17ad3243c93d19b0a0f31dc6ae44 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-12T20:39:44,160 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/.tmp/cf/fcf8eb579e094850805e937ebb7277c3 is 71, key is 14b7603d2c044af4ae87403c7c3c69f5/cf:q/1734035983585/Put/seqid=0 2024-12-12T20:39:44,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-12T20:39:44,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/.tmp/cf/a85c2f29d18145f6961cebd66d2d7918 is 71, key is 03e5cdc778745764b95518d296e5599b/cf:q/1734035983577/Put/seqid=0 2024-12-12T20:39:44,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742107_1283 (size=8326) 2024-12-12T20:39:44,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742107_1283 (size=8326) 2024-12-12T20:39:44,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742107_1283 (size=8326) 2024-12-12T20:39:44,211 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/.tmp/cf/fcf8eb579e094850805e937ebb7277c3 2024-12-12T20:39:44,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/.tmp/cf/fcf8eb579e094850805e937ebb7277c3 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/cf/fcf8eb579e094850805e937ebb7277c3 2024-12-12T20:39:44,268 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/cf/fcf8eb579e094850805e937ebb7277c3, entries=47, sequenceid=6, filesize=8.1 K 2024-12-12T20:39:44,279 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for bf7783e575074cc3a4ec1d197e979973 in 168ms, sequenceid=6, compaction requested=false 2024-12-12T20:39:44,279 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for bf7783e575074cc3a4ec1d197e979973: 2024-12-12T20:39:44,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-12T20:39:44,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:44,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:39:44,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/cf/fcf8eb579e094850805e937ebb7277c3] hfiles 2024-12-12T20:39:44,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/cf/fcf8eb579e094850805e937ebb7277c3 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:44,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742108_1284 (size=5288) 2024-12-12T20:39:44,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742108_1284 (size=5288) 2024-12-12T20:39:44,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742108_1284 (size=5288) 2024-12-12T20:39:44,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742109_1285 (size=125) 2024-12-12T20:39:44,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742109_1285 (size=125) 2024-12-12T20:39:44,412 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:39:44,412 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-12T20:39:44,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742109_1285 (size=125) 2024-12-12T20:39:44,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-12T20:39:44,412 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:44,413 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:39:44,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=107, state=SUCCESS; SnapshotRegionProcedure bf7783e575074cc3a4ec1d197e979973 in 475 msec 2024-12-12T20:39:44,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-12T20:39:44,745 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/.tmp/cf/a85c2f29d18145f6961cebd66d2d7918 2024-12-12T20:39:44,774 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0005_000001 (auth:SIMPLE) from 127.0.0.1:47006 2024-12-12T20:39:44,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/.tmp/cf/a85c2f29d18145f6961cebd66d2d7918 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/cf/a85c2f29d18145f6961cebd66d2d7918 2024-12-12T20:39:44,824 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/cf/a85c2f29d18145f6961cebd66d2d7918, entries=3, sequenceid=6, filesize=5.2 K 2024-12-12T20:39:44,829 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 8aad17ad3243c93d19b0a0f31dc6ae44 in 704ms, sequenceid=6, compaction requested=false 2024-12-12T20:39:44,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2538): Flush status journal for 8aad17ad3243c93d19b0a0f31dc6ae44: 2024-12-12T20:39:44,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-12T20:39:44,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:44,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:39:44,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/cf/a85c2f29d18145f6961cebd66d2d7918] hfiles 2024-12-12T20:39:44,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/cf/a85c2f29d18145f6961cebd66d2d7918 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:44,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742110_1286 (size=125) 2024-12-12T20:39:44,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:39:44,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=108 2024-12-12T20:39:44,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742110_1286 (size=125) 2024-12-12T20:39:44,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742110_1286 (size=125) 2024-12-12T20:39:44,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=108 2024-12-12T20:39:44,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:44,889 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:39:44,896 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=108, resume processing ppid=107 2024-12-12T20:39:44,896 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; SnapshotRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44 in 946 msec 2024-12-12T20:39:44,896 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:39:44,898 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:39:44,899 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:39:44,899 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:44,905 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:44,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-12T20:39:45,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742111_1287 (size=675) 2024-12-12T20:39:45,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742111_1287 (size=675) 2024-12-12T20:39:45,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742111_1287 (size=675) 2024-12-12T20:39:45,419 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:39:45,447 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:39:45,455 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:45,464 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:39:45,464 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-12T20:39:45,474 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 1.5970 sec 2024-12-12T20:39:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-12T20:39:45,993 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 107 completed 2024-12-12T20:39:46,022 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T20:39:46,026 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34188, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T20:39:46,028 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36817 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-12T20:39:46,029 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T20:39:46,033 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39292, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T20:39:46,033 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42407 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-12T20:39:46,034 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T20:39:46,043 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34434, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T20:39:46,044 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46859 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-12T20:39:46,057 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:39:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:39:46,076 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:39:46,076 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:39:46,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 110 2024-12-12T20:39:46,083 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:39:46,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T20:39:46,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742112_1288 (size=399) 2024-12-12T20:39:46,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742112_1288 (size=399) 2024-12-12T20:39:46,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742112_1288 (size=399) 2024-12-12T20:39:46,161 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ddedacd6c3cc0f161f5931ef87a8a8b0, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:39:46,164 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 58607d6420347fd64375fce8461106b2, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:39:46,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T20:39:46,209 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:39:46,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742114_1290 (size=85) 2024-12-12T20:39:46,274 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:39:46,274 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1681): Closing 58607d6420347fd64375fce8461106b2, disabling compactions & flushes 2024-12-12T20:39:46,274 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. 2024-12-12T20:39:46,274 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. 2024-12-12T20:39:46,274 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. after waiting 0 ms 2024-12-12T20:39:46,274 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. 2024-12-12T20:39:46,274 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. 2024-12-12T20:39:46,274 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1635): Region close journal for 58607d6420347fd64375fce8461106b2: 2024-12-12T20:39:46,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742114_1290 (size=85) 2024-12-12T20:39:46,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742114_1290 (size=85) 2024-12-12T20:39:46,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742113_1289 (size=85) 2024-12-12T20:39:46,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742113_1289 (size=85) 2024-12-12T20:39:46,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742113_1289 (size=85) 2024-12-12T20:39:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T20:39:46,684 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:39:46,685 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1681): Closing ddedacd6c3cc0f161f5931ef87a8a8b0, disabling compactions & flushes 2024-12-12T20:39:46,685 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. 2024-12-12T20:39:46,685 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. 2024-12-12T20:39:46,685 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. after waiting 0 ms 2024-12-12T20:39:46,685 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. 2024-12-12T20:39:46,685 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. 2024-12-12T20:39:46,685 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1635): Region close journal for ddedacd6c3cc0f161f5931ef87a8a8b0: 2024-12-12T20:39:46,686 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:39:46,686 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734035986686"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035986686"}]},"ts":"1734035986686"} 2024-12-12T20:39:46,687 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734035986686"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734035986686"}]},"ts":"1734035986686"} 2024-12-12T20:39:46,692 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:39:46,695 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:39:46,696 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035986695"}]},"ts":"1734035986695"} 2024-12-12T20:39:46,698 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-12T20:39:46,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T20:39:46,722 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:39:46,723 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:39:46,723 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:39:46,723 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:39:46,723 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:39:46,723 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:39:46,723 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:39:46,723 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:39:46,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddedacd6c3cc0f161f5931ef87a8a8b0, ASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=58607d6420347fd64375fce8461106b2, ASSIGN}] 2024-12-12T20:39:46,725 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=58607d6420347fd64375fce8461106b2, ASSIGN 2024-12-12T20:39:46,725 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddedacd6c3cc0f161f5931ef87a8a8b0, ASSIGN 2024-12-12T20:39:46,726 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddedacd6c3cc0f161f5931ef87a8a8b0, ASSIGN; state=OFFLINE, location=0a737713f6e3,46859,1734035782031; forceNewPlan=false, retain=false 2024-12-12T20:39:46,726 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=58607d6420347fd64375fce8461106b2, ASSIGN; state=OFFLINE, location=0a737713f6e3,42407,1734035781742; forceNewPlan=false, retain=false 2024-12-12T20:39:46,876 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:39:46,876 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=58607d6420347fd64375fce8461106b2, regionState=OPENING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:39:46,876 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=ddedacd6c3cc0f161f5931ef87a8a8b0, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:39:46,878 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; OpenRegionProcedure 58607d6420347fd64375fce8461106b2, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:39:46,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=111, state=RUNNABLE; OpenRegionProcedure ddedacd6c3cc0f161f5931ef87a8a8b0, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:39:47,029 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:39:47,030 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:39:47,036 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. 2024-12-12T20:39:47,037 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. 2024-12-12T20:39:47,037 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7285): Opening region: {ENCODED => ddedacd6c3cc0f161f5931ef87a8a8b0, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0.', STARTKEY => '', ENDKEY => '2'} 2024-12-12T20:39:47,037 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => 58607d6420347fd64375fce8461106b2, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2.', STARTKEY => '2', ENDKEY => ''} 2024-12-12T20:39:47,037 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. service=AccessControlService 2024-12-12T20:39:47,038 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. service=AccessControlService 2024-12-12T20:39:47,038 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:39:47,038 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:39:47,038 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:39:47,038 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:39:47,038 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 58607d6420347fd64375fce8461106b2 2024-12-12T20:39:47,038 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7327): checking encryption for ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:39:47,038 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:39:47,038 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7330): checking classloading for ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:39:47,038 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for 58607d6420347fd64375fce8461106b2 2024-12-12T20:39:47,039 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for 58607d6420347fd64375fce8461106b2 2024-12-12T20:39:47,040 INFO [StoreOpener-58607d6420347fd64375fce8461106b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 58607d6420347fd64375fce8461106b2 2024-12-12T20:39:47,040 INFO [StoreOpener-ddedacd6c3cc0f161f5931ef87a8a8b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:39:47,041 INFO [StoreOpener-58607d6420347fd64375fce8461106b2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 58607d6420347fd64375fce8461106b2 columnFamilyName cf 2024-12-12T20:39:47,041 DEBUG [StoreOpener-58607d6420347fd64375fce8461106b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:39:47,041 INFO [StoreOpener-ddedacd6c3cc0f161f5931ef87a8a8b0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ddedacd6c3cc0f161f5931ef87a8a8b0 columnFamilyName cf 2024-12-12T20:39:47,041 DEBUG [StoreOpener-ddedacd6c3cc0f161f5931ef87a8a8b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:39:47,042 INFO [StoreOpener-ddedacd6c3cc0f161f5931ef87a8a8b0-1 {}] regionserver.HStore(327): Store=ddedacd6c3cc0f161f5931ef87a8a8b0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:39:47,042 INFO [StoreOpener-58607d6420347fd64375fce8461106b2-1 {}] regionserver.HStore(327): Store=58607d6420347fd64375fce8461106b2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:39:47,043 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2 2024-12-12T20:39:47,043 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:39:47,043 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:39:47,043 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2 2024-12-12T20:39:47,045 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for 58607d6420347fd64375fce8461106b2 2024-12-12T20:39:47,045 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1085): writing seq id for ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:39:47,046 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:39:47,047 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:39:47,047 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1102): Opened ddedacd6c3cc0f161f5931ef87a8a8b0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62307531, jitterRate=-0.07154543697834015}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:39:47,047 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened 58607d6420347fd64375fce8461106b2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73368508, jitterRate=0.09327596426010132}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:39:47,047 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1001): Region open journal for ddedacd6c3cc0f161f5931ef87a8a8b0: 2024-12-12T20:39:47,047 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for 58607d6420347fd64375fce8461106b2: 2024-12-12T20:39:47,048 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2., pid=113, masterSystemTime=1734035987029 2024-12-12T20:39:47,048 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0., pid=114, masterSystemTime=1734035987030 2024-12-12T20:39:47,049 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. 2024-12-12T20:39:47,049 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. 2024-12-12T20:39:47,050 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=58607d6420347fd64375fce8461106b2, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:39:47,050 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. 2024-12-12T20:39:47,050 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. 2024-12-12T20:39:47,051 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=ddedacd6c3cc0f161f5931ef87a8a8b0, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:39:47,052 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-12T20:39:47,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=111 2024-12-12T20:39:47,053 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; OpenRegionProcedure 58607d6420347fd64375fce8461106b2, server=0a737713f6e3,42407,1734035781742 in 174 msec 2024-12-12T20:39:47,053 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=58607d6420347fd64375fce8461106b2, ASSIGN in 328 msec 2024-12-12T20:39:47,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=111, state=SUCCESS; OpenRegionProcedure ddedacd6c3cc0f161f5931ef87a8a8b0, server=0a737713f6e3,46859,1734035782031 in 174 msec 2024-12-12T20:39:47,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-12T20:39:47,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddedacd6c3cc0f161f5931ef87a8a8b0, ASSIGN in 329 msec 2024-12-12T20:39:47,054 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:39:47,054 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734035987054"}]},"ts":"1734035987054"} 2024-12-12T20:39:47,055 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-12T20:39:47,192 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:39:47,192 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-12T20:39:47,194 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-12T20:39:47,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T20:39:47,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:47,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:47,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:47,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:39:47,515 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:47,515 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:47,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:47,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:47,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:47,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:47,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 1.4570 sec 2024-12-12T20:39:47,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:47,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-12T20:39:48,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T20:39:48,203 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 110 completed 2024-12-12T20:39:48,220 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39296, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:39:48,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster$2(2219): Client=jenkins//172.17.0.2 merge regions [ddedacd6c3cc0f161f5931ef87a8a8b0, 58607d6420347fd64375fce8461106b2] 2024-12-12T20:39:48,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[ddedacd6c3cc0f161f5931ef87a8a8b0, 58607d6420347fd64375fce8461106b2], force=true 2024-12-12T20:39:48,292 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[ddedacd6c3cc0f161f5931ef87a8a8b0, 58607d6420347fd64375fce8461106b2], force=true 2024-12-12T20:39:48,292 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[ddedacd6c3cc0f161f5931ef87a8a8b0, 58607d6420347fd64375fce8461106b2], force=true 2024-12-12T20:39:48,292 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[ddedacd6c3cc0f161f5931ef87a8a8b0, 58607d6420347fd64375fce8461106b2], force=true 2024-12-12T20:39:48,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-12T20:39:48,339 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddedacd6c3cc0f161f5931ef87a8a8b0, UNASSIGN}, {pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=58607d6420347fd64375fce8461106b2, UNASSIGN}] 2024-12-12T20:39:48,342 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=58607d6420347fd64375fce8461106b2, UNASSIGN 2024-12-12T20:39:48,342 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddedacd6c3cc0f161f5931ef87a8a8b0, UNASSIGN 2024-12-12T20:39:48,344 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=58607d6420347fd64375fce8461106b2, regionState=CLOSING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:39:48,344 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=ddedacd6c3cc0f161f5931ef87a8a8b0, regionState=CLOSING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:39:48,349 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-12T20:39:48,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE; CloseRegionProcedure ddedacd6c3cc0f161f5931ef87a8a8b0, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:39:48,350 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-12T20:39:48,352 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=117, state=RUNNABLE; CloseRegionProcedure 58607d6420347fd64375fce8461106b2, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:39:48,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-12T20:39:48,509 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:39:48,510 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(124): Close ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:39:48,510 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-12T20:39:48,510 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1681): Closing ddedacd6c3cc0f161f5931ef87a8a8b0, disabling compactions & flushes 2024-12-12T20:39:48,510 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. 2024-12-12T20:39:48,511 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. 2024-12-12T20:39:48,511 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. after waiting 0 ms 2024-12-12T20:39:48,511 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. 2024-12-12T20:39:48,511 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(2837): Flushing ddedacd6c3cc0f161f5931ef87a8a8b0 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-12T20:39:48,525 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:39:48,526 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close 58607d6420347fd64375fce8461106b2 2024-12-12T20:39:48,526 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-12T20:39:48,526 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing 58607d6420347fd64375fce8461106b2, disabling compactions & flushes 2024-12-12T20:39:48,527 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. 2024-12-12T20:39:48,527 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. 2024-12-12T20:39:48,527 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. after waiting 0 ms 2024-12-12T20:39:48,527 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. 2024-12-12T20:39:48,527 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing 58607d6420347fd64375fce8461106b2 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-12T20:39:48,534 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/.tmp/cf/e8876c45f822475ea82e7af8e9a51838 is 28, key is 1/cf:/1734035988209/Put/seqid=0 2024-12-12T20:39:48,552 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/.tmp/cf/e2622c880a6946949e860e3032a5c932 is 28, key is 2/cf:/1734035988223/Put/seqid=0 2024-12-12T20:39:48,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742115_1291 (size=4945) 2024-12-12T20:39:48,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742115_1291 (size=4945) 2024-12-12T20:39:48,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742115_1291 (size=4945) 2024-12-12T20:39:48,585 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/.tmp/cf/e8876c45f822475ea82e7af8e9a51838 2024-12-12T20:39:48,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742116_1292 (size=4945) 2024-12-12T20:39:48,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742116_1292 (size=4945) 2024-12-12T20:39:48,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742116_1292 (size=4945) 2024-12-12T20:39:48,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-12T20:39:48,619 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/.tmp/cf/e8876c45f822475ea82e7af8e9a51838 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/cf/e8876c45f822475ea82e7af8e9a51838 2024-12-12T20:39:48,629 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/cf/e8876c45f822475ea82e7af8e9a51838, entries=1, sequenceid=5, filesize=4.8 K 2024-12-12T20:39:48,636 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for ddedacd6c3cc0f161f5931ef87a8a8b0 in 124ms, sequenceid=5, compaction requested=false 2024-12-12T20:39:48,636 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-12T20:39:48,675 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T20:39:48,678 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:39:48,678 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0. 2024-12-12T20:39:48,679 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1635): Region close journal for ddedacd6c3cc0f161f5931ef87a8a8b0: 2024-12-12T20:39:48,684 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(170): Closed ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:39:48,686 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=ddedacd6c3cc0f161f5931ef87a8a8b0, regionState=CLOSED 2024-12-12T20:39:48,694 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=116 2024-12-12T20:39:48,694 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=116, state=SUCCESS; CloseRegionProcedure ddedacd6c3cc0f161f5931ef87a8a8b0, server=0a737713f6e3,46859,1734035782031 in 339 msec 2024-12-12T20:39:48,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddedacd6c3cc0f161f5931ef87a8a8b0, UNASSIGN in 356 msec 2024-12-12T20:39:48,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-12T20:39:49,002 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/.tmp/cf/e2622c880a6946949e860e3032a5c932 2024-12-12T20:39:49,017 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/.tmp/cf/e2622c880a6946949e860e3032a5c932 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/cf/e2622c880a6946949e860e3032a5c932 2024-12-12T20:39:49,029 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/cf/e2622c880a6946949e860e3032a5c932, entries=1, sequenceid=5, filesize=4.8 K 2024-12-12T20:39:49,035 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 58607d6420347fd64375fce8461106b2 in 508ms, sequenceid=5, compaction requested=false 2024-12-12T20:39:49,072 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T20:39:49,074 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:39:49,074 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2. 2024-12-12T20:39:49,075 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for 58607d6420347fd64375fce8461106b2: 2024-12-12T20:39:49,086 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=58607d6420347fd64375fce8461106b2, regionState=CLOSED 2024-12-12T20:39:49,087 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed 58607d6420347fd64375fce8461106b2 2024-12-12T20:39:49,105 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=117 2024-12-12T20:39:49,106 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=117, state=SUCCESS; CloseRegionProcedure 58607d6420347fd64375fce8461106b2, server=0a737713f6e3,42407,1734035781742 in 749 msec 2024-12-12T20:39:49,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=115 2024-12-12T20:39:49,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=58607d6420347fd64375fce8461106b2, UNASSIGN in 767 msec 2024-12-12T20:39:49,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742117_1293 (size=84) 2024-12-12T20:39:49,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742117_1293 (size=84) 2024-12-12T20:39:49,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742117_1293 (size=84) 2024-12-12T20:39:49,186 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:39:49,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742118_1294 (size=20) 2024-12-12T20:39:49,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742118_1294 (size=20) 2024-12-12T20:39:49,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742118_1294 (size=20) 2024-12-12T20:39:49,300 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:39:49,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-12T20:39:49,638 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:39:49,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742119_1295 (size=21) 2024-12-12T20:39:49,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742119_1295 (size=21) 2024-12-12T20:39:49,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742119_1295 (size=21) 2024-12-12T20:39:49,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742120_1296 (size=84) 2024-12-12T20:39:49,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742120_1296 (size=84) 2024-12-12T20:39:49,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742120_1296 (size=84) 2024-12-12T20:39:49,750 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:39:49,800 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-12T20:39:49,809 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986056.ddedacd6c3cc0f161f5931ef87a8a8b0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-12T20:39:49,810 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734035986056.58607d6420347fd64375fce8461106b2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-12T20:39:49,810 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-12T20:39:49,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=04bdedca79d2c7a6c82ea3b440f67379, ASSIGN}] 2024-12-12T20:39:49,884 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=04bdedca79d2c7a6c82ea3b440f67379, ASSIGN 2024-12-12T20:39:49,888 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=04bdedca79d2c7a6c82ea3b440f67379, ASSIGN; state=MERGED, location=0a737713f6e3,46859,1734035782031; forceNewPlan=false, retain=false 2024-12-12T20:39:49,899 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0005/container_1734035798001_0005_01_000001/launch_container.sh] 2024-12-12T20:39:49,899 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0005/container_1734035798001_0005_01_000001/container_tokens] 2024-12-12T20:39:49,899 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0005/container_1734035798001_0005_01_000001/sysfs] 2024-12-12T20:39:50,039 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-12T20:39:50,039 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=04bdedca79d2c7a6c82ea3b440f67379, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:39:50,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure 04bdedca79d2c7a6c82ea3b440f67379, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:39:50,199 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:39:50,209 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. 2024-12-12T20:39:50,210 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => 04bdedca79d2c7a6c82ea3b440f67379, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379.', STARTKEY => '', ENDKEY => ''} 2024-12-12T20:39:50,210 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. service=AccessControlService 2024-12-12T20:39:50,211 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:39:50,211 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:39:50,211 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:39:50,211 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for 04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:39:50,211 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for 04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:39:50,222 INFO [StoreOpener-04bdedca79d2c7a6c82ea3b440f67379-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:39:50,225 INFO [StoreOpener-04bdedca79d2c7a6c82ea3b440f67379-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04bdedca79d2c7a6c82ea3b440f67379 columnFamilyName cf 2024-12-12T20:39:50,225 DEBUG [StoreOpener-04bdedca79d2c7a6c82ea3b440f67379-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:39:50,272 DEBUG [StoreOpener-04bdedca79d2c7a6c82ea3b440f67379-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/cf/e2622c880a6946949e860e3032a5c932.58607d6420347fd64375fce8461106b2->hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/cf/e2622c880a6946949e860e3032a5c932-top 2024-12-12T20:39:50,302 DEBUG [StoreOpener-04bdedca79d2c7a6c82ea3b440f67379-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/cf/e8876c45f822475ea82e7af8e9a51838.ddedacd6c3cc0f161f5931ef87a8a8b0->hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/cf/e8876c45f822475ea82e7af8e9a51838-top 2024-12-12T20:39:50,302 INFO [StoreOpener-04bdedca79d2c7a6c82ea3b440f67379-1 {}] regionserver.HStore(327): Store=04bdedca79d2c7a6c82ea3b440f67379/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:39:50,304 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:39:50,309 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:39:50,318 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for 04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:39:50,320 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened 04bdedca79d2c7a6c82ea3b440f67379; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59833053, jitterRate=-0.10841803252696991}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:39:50,321 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for 04bdedca79d2c7a6c82ea3b440f67379: 2024-12-12T20:39:50,331 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379., pid=121, masterSystemTime=1734035990199 2024-12-12T20:39:50,332 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379.,because compaction is disabled. 2024-12-12T20:39:50,345 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. 2024-12-12T20:39:50,345 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. 2024-12-12T20:39:50,348 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=04bdedca79d2c7a6c82ea3b440f67379, regionState=OPEN, openSeqNum=9, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:39:50,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-12T20:39:50,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure 04bdedca79d2c7a6c82ea3b440f67379, server=0a737713f6e3,46859,1734035782031 in 308 msec 2024-12-12T20:39:50,367 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=115 2024-12-12T20:39:50,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=04bdedca79d2c7a6c82ea3b440f67379, ASSIGN in 480 msec 2024-12-12T20:39:50,385 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[ddedacd6c3cc0f161f5931ef87a8a8b0, 58607d6420347fd64375fce8461106b2], force=true in 2.0880 sec 2024-12-12T20:39:50,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-12T20:39:50,416 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 115 completed 2024-12-12T20:39:50,417 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-12T20:39:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734035990417 (current time:1734035990417). 2024-12-12T20:39:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:39:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-12T20:39:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:39:50,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6b7ade27 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6993606c 2024-12-12T20:39:50,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f0d1296, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:39:50,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:39:50,490 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43686, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:39:50,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6b7ade27 to 127.0.0.1:52655 2024-12-12T20:39:50,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:39:50,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75b3ec00 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75162da 2024-12-12T20:39:50,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20ef4904, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:39:50,567 DEBUG [hconnection-0x44c80e48-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:39:50,569 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43688, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:39:50,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75b3ec00 to 127.0.0.1:52655 2024-12-12T20:39:50,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:39:50,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-12T20:39:50,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:39:50,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-12T20:39:50,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-12T20:39:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T20:39:50,604 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:39:50,613 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:39:50,627 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:39:50,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742121_1297 (size=216) 2024-12-12T20:39:50,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742121_1297 (size=216) 2024-12-12T20:39:50,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742121_1297 (size=216) 2024-12-12T20:39:50,681 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:39:50,681 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 04bdedca79d2c7a6c82ea3b440f67379}] 2024-12-12T20:39:50,688 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:39:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T20:39:50,844 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:39:50,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-12T20:39:50,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. 2024-12-12T20:39:50,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 04bdedca79d2c7a6c82ea3b440f67379: 2024-12-12T20:39:50,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-12T20:39:50,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:39:50,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:39:50,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/cf/e2622c880a6946949e860e3032a5c932.58607d6420347fd64375fce8461106b2->hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/cf/e2622c880a6946949e860e3032a5c932-top, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/cf/e8876c45f822475ea82e7af8e9a51838.ddedacd6c3cc0f161f5931ef87a8a8b0->hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/cf/e8876c45f822475ea82e7af8e9a51838-top] hfiles 2024-12-12T20:39:50,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/cf/e2622c880a6946949e860e3032a5c932.58607d6420347fd64375fce8461106b2 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:39:50,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/cf/e8876c45f822475ea82e7af8e9a51838.ddedacd6c3cc0f161f5931ef87a8a8b0 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:39:50,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T20:39:50,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742122_1298 (size=269) 2024-12-12T20:39:50,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742122_1298 (size=269) 2024-12-12T20:39:50,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742122_1298 (size=269) 2024-12-12T20:39:50,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. 2024-12-12T20:39:50,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-12T20:39:50,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-12T20:39:50,975 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:39:50,976 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:39:50,995 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-12T20:39:50,995 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; SnapshotRegionProcedure 04bdedca79d2c7a6c82ea3b440f67379 in 301 msec 2024-12-12T20:39:50,995 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:39:50,999 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:39:51,001 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:39:51,002 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:39:51,005 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:39:51,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742123_1299 (size=670) 2024-12-12T20:39:51,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742123_1299 (size=670) 2024-12-12T20:39:51,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742123_1299 (size=670) 2024-12-12T20:39:51,099 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:39:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:39:51,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-12T20:39:51,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:39:51,127 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-12T20:39:51,139 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:39:51,143 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:39:51,147 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:39:51,147 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-12T20:39:51,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 560 msec 2024-12-12T20:39:51,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T20:39:51,219 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 122 completed 2024-12-12T20:39:51,219 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035991219 2024-12-12T20:39:51,219 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37615, tgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035991219, rawTgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035991219, srcFsUri=hdfs://localhost:37615, srcDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:39:51,254 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37615, inputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:39:51,254 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035991219, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035991219/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:39:51,256 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-12T20:39:51,263 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035991219/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:39:51,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742124_1300 (size=670) 2024-12-12T20:39:51,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742124_1300 (size=670) 2024-12-12T20:39:51,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742124_1300 (size=670) 2024-12-12T20:39:51,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742125_1301 (size=216) 2024-12-12T20:39:51,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742125_1301 (size=216) 2024-12-12T20:39:51,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742125_1301 (size=216) 2024-12-12T20:39:51,299 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:51,299 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:51,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:51,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:52,542 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:39:52,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-7747276352158661881.jar 2024-12-12T20:39:52,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:52,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:52,690 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-12660674096792687438.jar 2024-12-12T20:39:52,690 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:52,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:52,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:52,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:52,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:52,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-12T20:39:52,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T20:39:52,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T20:39:52,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T20:39:52,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T20:39:52,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T20:39:52,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T20:39:52,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-12T20:39:52,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T20:39:52,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T20:39:52,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T20:39:52,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T20:39:52,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T20:39:52,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:39:52,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:39:52,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:39:52,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:39:52,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:39:52,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:39:52,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:39:52,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742126_1302 (size=451756) 2024-12-12T20:39:52,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742126_1302 (size=451756) 2024-12-12T20:39:52,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742126_1302 (size=451756) 2024-12-12T20:39:52,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742127_1303 (size=127628) 2024-12-12T20:39:52,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742127_1303 (size=127628) 2024-12-12T20:39:52,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742127_1303 (size=127628) 2024-12-12T20:39:52,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742128_1304 (size=2172101) 2024-12-12T20:39:52,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742128_1304 (size=2172101) 2024-12-12T20:39:52,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742128_1304 (size=2172101) 2024-12-12T20:39:53,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742129_1305 (size=213228) 2024-12-12T20:39:53,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742129_1305 (size=213228) 2024-12-12T20:39:53,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742129_1305 (size=213228) 2024-12-12T20:39:53,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742130_1306 (size=1877034) 2024-12-12T20:39:53,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742130_1306 (size=1877034) 2024-12-12T20:39:53,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742130_1306 (size=1877034) 2024-12-12T20:39:53,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742131_1307 (size=533455) 2024-12-12T20:39:53,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742131_1307 (size=533455) 2024-12-12T20:39:53,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742131_1307 (size=533455) 2024-12-12T20:39:53,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742132_1308 (size=7280644) 2024-12-12T20:39:53,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742132_1308 (size=7280644) 2024-12-12T20:39:53,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742132_1308 (size=7280644) 2024-12-12T20:39:53,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742133_1309 (size=4188619) 2024-12-12T20:39:53,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742133_1309 (size=4188619) 2024-12-12T20:39:53,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742133_1309 (size=4188619) 2024-12-12T20:39:53,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742134_1310 (size=20406) 2024-12-12T20:39:53,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742134_1310 (size=20406) 2024-12-12T20:39:53,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742134_1310 (size=20406) 2024-12-12T20:39:53,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742135_1311 (size=75495) 2024-12-12T20:39:53,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742135_1311 (size=75495) 2024-12-12T20:39:53,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742135_1311 (size=75495) 2024-12-12T20:39:53,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742136_1312 (size=45609) 2024-12-12T20:39:53,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742136_1312 (size=45609) 2024-12-12T20:39:53,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742136_1312 (size=45609) 2024-12-12T20:39:54,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742137_1313 (size=110084) 2024-12-12T20:39:54,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742137_1313 (size=110084) 2024-12-12T20:39:54,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742137_1313 (size=110084) 2024-12-12T20:39:54,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742138_1314 (size=1323991) 2024-12-12T20:39:54,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742138_1314 (size=1323991) 2024-12-12T20:39:54,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742138_1314 (size=1323991) 2024-12-12T20:39:54,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742139_1315 (size=23076) 2024-12-12T20:39:54,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742139_1315 (size=23076) 2024-12-12T20:39:54,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742139_1315 (size=23076) 2024-12-12T20:39:54,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742140_1316 (size=126803) 2024-12-12T20:39:54,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742140_1316 (size=126803) 2024-12-12T20:39:54,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742140_1316 (size=126803) 2024-12-12T20:39:54,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742141_1317 (size=322274) 2024-12-12T20:39:54,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742141_1317 (size=322274) 2024-12-12T20:39:54,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742141_1317 (size=322274) 2024-12-12T20:39:54,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742142_1318 (size=1832290) 2024-12-12T20:39:54,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742142_1318 (size=1832290) 2024-12-12T20:39:54,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742142_1318 (size=1832290) 2024-12-12T20:39:54,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742143_1319 (size=30081) 2024-12-12T20:39:54,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742143_1319 (size=30081) 2024-12-12T20:39:54,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742143_1319 (size=30081) 2024-12-12T20:39:54,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742144_1320 (size=53616) 2024-12-12T20:39:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742144_1320 (size=53616) 2024-12-12T20:39:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742144_1320 (size=53616) 2024-12-12T20:39:54,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742145_1321 (size=29229) 2024-12-12T20:39:54,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742145_1321 (size=29229) 2024-12-12T20:39:54,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742145_1321 (size=29229) 2024-12-12T20:39:54,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742146_1322 (size=169089) 2024-12-12T20:39:54,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742146_1322 (size=169089) 2024-12-12T20:39:54,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742146_1322 (size=169089) 2024-12-12T20:39:55,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742147_1323 (size=5175431) 2024-12-12T20:39:55,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742147_1323 (size=5175431) 2024-12-12T20:39:55,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742147_1323 (size=5175431) 2024-12-12T20:39:55,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742148_1324 (size=136454) 2024-12-12T20:39:55,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742148_1324 (size=136454) 2024-12-12T20:39:55,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742148_1324 (size=136454) 2024-12-12T20:39:55,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742149_1325 (size=907861) 2024-12-12T20:39:55,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742149_1325 (size=907861) 2024-12-12T20:39:55,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742149_1325 (size=907861) 2024-12-12T20:39:55,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742150_1326 (size=3317408) 2024-12-12T20:39:55,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742150_1326 (size=3317408) 2024-12-12T20:39:55,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742150_1326 (size=3317408) 2024-12-12T20:39:55,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742151_1327 (size=6350857) 2024-12-12T20:39:55,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742151_1327 (size=6350857) 2024-12-12T20:39:55,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742151_1327 (size=6350857) 2024-12-12T20:39:56,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742152_1328 (size=503880) 2024-12-12T20:39:56,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742152_1328 (size=503880) 2024-12-12T20:39:56,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742152_1328 (size=503880) 2024-12-12T20:39:56,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742153_1329 (size=4695811) 2024-12-12T20:39:56,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742153_1329 (size=4695811) 2024-12-12T20:39:56,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742153_1329 (size=4695811) 2024-12-12T20:39:56,149 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T20:39:56,163 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-12T20:39:56,176 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=9.7 K 2024-12-12T20:39:56,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742154_1330 (size=378) 2024-12-12T20:39:56,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742154_1330 (size=378) 2024-12-12T20:39:56,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742154_1330 (size=378) 2024-12-12T20:39:56,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742155_1331 (size=15) 2024-12-12T20:39:56,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742155_1331 (size=15) 2024-12-12T20:39:56,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742155_1331 (size=15) 2024-12-12T20:39:56,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742156_1332 (size=304942) 2024-12-12T20:39:56,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742156_1332 (size=304942) 2024-12-12T20:39:56,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742156_1332 (size=304942) 2024-12-12T20:39:56,385 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:39:56,385 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:39:56,464 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0006_000001 (auth:SIMPLE) from 127.0.0.1:47754 2024-12-12T20:40:05,096 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0006_000001 (auth:SIMPLE) from 127.0.0.1:47576 2024-12-12T20:40:05,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742157_1333 (size=350616) 2024-12-12T20:40:05,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742157_1333 (size=350616) 2024-12-12T20:40:05,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742157_1333 (size=350616) 2024-12-12T20:40:07,635 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0006_000001 (auth:SIMPLE) from 127.0.0.1:53586 2024-12-12T20:40:14,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742158_1334 (size=4945) 2024-12-12T20:40:14,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742158_1334 (size=4945) 2024-12-12T20:40:14,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742158_1334 (size=4945) 2024-12-12T20:40:14,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742159_1335 (size=4945) 2024-12-12T20:40:14,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742159_1335 (size=4945) 2024-12-12T20:40:14,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742159_1335 (size=4945) 2024-12-12T20:40:14,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742160_1336 (size=17474) 2024-12-12T20:40:14,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742160_1336 (size=17474) 2024-12-12T20:40:14,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742160_1336 (size=17474) 2024-12-12T20:40:14,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742161_1337 (size=482) 2024-12-12T20:40:14,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742161_1337 (size=482) 2024-12-12T20:40:14,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742161_1337 (size=482) 2024-12-12T20:40:14,831 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0006/container_1734035798001_0006_01_000002/launch_container.sh] 2024-12-12T20:40:14,832 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0006/container_1734035798001_0006_01_000002/container_tokens] 2024-12-12T20:40:14,832 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0006/container_1734035798001_0006_01_000002/sysfs] 2024-12-12T20:40:14,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742162_1338 (size=17474) 2024-12-12T20:40:14,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742162_1338 (size=17474) 2024-12-12T20:40:14,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742162_1338 (size=17474) 2024-12-12T20:40:14,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742163_1339 (size=350616) 2024-12-12T20:40:14,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742163_1339 (size=350616) 2024-12-12T20:40:14,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742163_1339 (size=350616) 2024-12-12T20:40:14,942 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0006_000001 (auth:SIMPLE) from 127.0.0.1:54090 2024-12-12T20:40:16,813 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-12T20:40:16,820 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-12T20:40:16,835 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:16,835 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-12T20:40:16,835 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-12T20:40:16,835 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:16,836 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-12T20:40:16,836 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-12T20:40:16,836 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035991219/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035991219/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:16,837 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035991219/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-12T20:40:16,837 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734035991219/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-12T20:40:16,846 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:16,847 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:16,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:16,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T20:40:16,855 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036016855"}]},"ts":"1734036016855"} 2024-12-12T20:40:16,856 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-12T20:40:16,865 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-12T20:40:16,866 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-12T20:40:16,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=04bdedca79d2c7a6c82ea3b440f67379, UNASSIGN}] 2024-12-12T20:40:16,868 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=04bdedca79d2c7a6c82ea3b440f67379, UNASSIGN 2024-12-12T20:40:16,869 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=04bdedca79d2c7a6c82ea3b440f67379, regionState=CLOSING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:40:16,870 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:40:16,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure 04bdedca79d2c7a6c82ea3b440f67379, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:40:16,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T20:40:17,026 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:17,027 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close 04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:40:17,027 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:40:17,027 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing 04bdedca79d2c7a6c82ea3b440f67379, disabling compactions & flushes 2024-12-12T20:40:17,027 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. 2024-12-12T20:40:17,027 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. 2024-12-12T20:40:17,027 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. after waiting 0 ms 2024-12-12T20:40:17,027 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. 2024-12-12T20:40:17,036 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-12T20:40:17,036 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:40:17,036 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379. 2024-12-12T20:40:17,037 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for 04bdedca79d2c7a6c82ea3b440f67379: 2024-12-12T20:40:17,039 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed 04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:40:17,039 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=04bdedca79d2c7a6c82ea3b440f67379, regionState=CLOSED 2024-12-12T20:40:17,042 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-12T20:40:17,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure 04bdedca79d2c7a6c82ea3b440f67379, server=0a737713f6e3,46859,1734035782031 in 170 msec 2024-12-12T20:40:17,046 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-12T20:40:17,046 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=04bdedca79d2c7a6c82ea3b440f67379, UNASSIGN in 175 msec 2024-12-12T20:40:17,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-12T20:40:17,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 181 msec 2024-12-12T20:40:17,050 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036017050"}]},"ts":"1734036017050"} 2024-12-12T20:40:17,051 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-12T20:40:17,057 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-12T20:40:17,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 211 msec 2024-12-12T20:40:17,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T20:40:17,157 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 124 completed 2024-12-12T20:40:17,157 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,159 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,159 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,161 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,162 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:40:17,162 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:40:17,162 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2 2024-12-12T20:40:17,167 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/recovered.edits] 2024-12-12T20:40:17,167 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/recovered.edits] 2024-12-12T20:40:17,167 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/recovered.edits] 2024-12-12T20:40:17,172 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/cf/e8876c45f822475ea82e7af8e9a51838 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/cf/e8876c45f822475ea82e7af8e9a51838 2024-12-12T20:40:17,174 DEBUG [HFileArchiver-33 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/cf/e8876c45f822475ea82e7af8e9a51838.ddedacd6c3cc0f161f5931ef87a8a8b0 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/cf/e8876c45f822475ea82e7af8e9a51838.ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:40:17,174 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/cf/e2622c880a6946949e860e3032a5c932 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/cf/e2622c880a6946949e860e3032a5c932 2024-12-12T20:40:17,174 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/cf/e2622c880a6946949e860e3032a5c932.58607d6420347fd64375fce8461106b2 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/cf/e2622c880a6946949e860e3032a5c932.58607d6420347fd64375fce8461106b2 2024-12-12T20:40:17,176 DEBUG [HFileArchiver-34 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/recovered.edits/8.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0/recovered.edits/8.seqid 2024-12-12T20:40:17,176 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddedacd6c3cc0f161f5931ef87a8a8b0 2024-12-12T20:40:17,177 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/recovered.edits/8.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2/recovered.edits/8.seqid 2024-12-12T20:40:17,177 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/recovered.edits/12.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379/recovered.edits/12.seqid 2024-12-12T20:40:17,177 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/58607d6420347fd64375fce8461106b2 2024-12-12T20:40:17,177 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/04bdedca79d2c7a6c82ea3b440f67379 2024-12-12T20:40:17,177 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-12T20:40:17,179 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,183 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-12T20:40:17,183 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-12T20:40:17,183 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-12T20:40:17,183 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-12T20:40:17,189 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-12T20:40:17,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:17,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:17,191 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-12T20:40:17,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:17,192 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,192 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-12T20:40:17,192 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734036017192"}]},"ts":"9223372036854775807"} 2024-12-12T20:40:17,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T20:40:17,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:17,193 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:17,194 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:17,194 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:17,194 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:17,194 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T20:40:17,194 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 04bdedca79d2c7a6c82ea3b440f67379, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T20:40:17,194 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-12T20:40:17,195 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734036017194"}]},"ts":"9223372036854775807"} 2024-12-12T20:40:17,197 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-12T20:40:17,208 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,209 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 51 msec 2024-12-12T20:40:17,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T20:40:17,293 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 128 completed 2024-12-12T20:40:17,294 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,294 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-12T20:40:17,297 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036017297"}]},"ts":"1734036017297"} 2024-12-12T20:40:17,299 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-12T20:40:17,329 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-12T20:40:17,331 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-12T20:40:17,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8aad17ad3243c93d19b0a0f31dc6ae44, UNASSIGN}, {pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bf7783e575074cc3a4ec1d197e979973, UNASSIGN}] 2024-12-12T20:40:17,343 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8aad17ad3243c93d19b0a0f31dc6ae44, UNASSIGN 2024-12-12T20:40:17,343 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bf7783e575074cc3a4ec1d197e979973, UNASSIGN 2024-12-12T20:40:17,344 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=8aad17ad3243c93d19b0a0f31dc6ae44, regionState=CLOSING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:40:17,347 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=bf7783e575074cc3a4ec1d197e979973, regionState=CLOSING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:40:17,356 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:40:17,356 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=131, state=RUNNABLE; CloseRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:40:17,359 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:40:17,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=132, state=RUNNABLE; CloseRegionProcedure bf7783e575074cc3a4ec1d197e979973, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:40:17,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-12T20:40:17,513 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:40:17,514 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:40:17,514 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:40:17,514 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing 8aad17ad3243c93d19b0a0f31dc6ae44, disabling compactions & flushes 2024-12-12T20:40:17,514 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:40:17,514 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:40:17,514 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:17,514 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. after waiting 0 ms 2024-12-12T20:40:17,514 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:40:17,515 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(124): Close bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:40:17,515 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:40:17,515 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1681): Closing bf7783e575074cc3a4ec1d197e979973, disabling compactions & flushes 2024-12-12T20:40:17,515 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:40:17,515 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:40:17,515 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. after waiting 0 ms 2024-12-12T20:40:17,515 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:40:17,542 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:40:17,543 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:40:17,543 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973. 2024-12-12T20:40:17,543 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1635): Region close journal for bf7783e575074cc3a4ec1d197e979973: 2024-12-12T20:40:17,545 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(170): Closed bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:40:17,545 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=bf7783e575074cc3a4ec1d197e979973, regionState=CLOSED 2024-12-12T20:40:17,548 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:40:17,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=132 2024-12-12T20:40:17,549 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:40:17,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=132, state=SUCCESS; CloseRegionProcedure bf7783e575074cc3a4ec1d197e979973, server=0a737713f6e3,46859,1734035782031 in 188 msec 2024-12-12T20:40:17,549 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44. 2024-12-12T20:40:17,549 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for 8aad17ad3243c93d19b0a0f31dc6ae44: 2024-12-12T20:40:17,550 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bf7783e575074cc3a4ec1d197e979973, UNASSIGN in 208 msec 2024-12-12T20:40:17,551 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed 8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:40:17,551 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=8aad17ad3243c93d19b0a0f31dc6ae44, regionState=CLOSED 2024-12-12T20:40:17,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=131 2024-12-12T20:40:17,556 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=131, state=SUCCESS; CloseRegionProcedure 8aad17ad3243c93d19b0a0f31dc6ae44, server=0a737713f6e3,36817,1734035782356 in 197 msec 2024-12-12T20:40:17,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-12T20:40:17,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8aad17ad3243c93d19b0a0f31dc6ae44, UNASSIGN in 214 msec 2024-12-12T20:40:17,561 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-12-12T20:40:17,561 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 228 msec 2024-12-12T20:40:17,563 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036017563"}]},"ts":"1734036017563"} 2024-12-12T20:40:17,565 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-12T20:40:17,574 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-12T20:40:17,577 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 281 msec 2024-12-12T20:40:17,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-12T20:40:17,599 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 129 completed 2024-12-12T20:40:17,600 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,602 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,602 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=135, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,603 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,604 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:40:17,604 DEBUG [HFileArchiver-33 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:40:17,606 DEBUG [HFileArchiver-33 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/recovered.edits] 2024-12-12T20:40:17,606 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/recovered.edits] 2024-12-12T20:40:17,610 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/cf/fcf8eb579e094850805e937ebb7277c3 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/cf/fcf8eb579e094850805e937ebb7277c3 2024-12-12T20:40:17,610 DEBUG [HFileArchiver-34 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/cf/a85c2f29d18145f6961cebd66d2d7918 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/cf/a85c2f29d18145f6961cebd66d2d7918 2024-12-12T20:40:17,613 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973/recovered.edits/9.seqid 2024-12-12T20:40:17,613 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44/recovered.edits/9.seqid 2024-12-12T20:40:17,614 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/bf7783e575074cc3a4ec1d197e979973 2024-12-12T20:40:17,614 DEBUG [HFileArchiver-33 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithMergeRegion/8aad17ad3243c93d19b0a0f31dc6ae44 2024-12-12T20:40:17,614 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-12T20:40:17,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,616 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=135, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,617 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-12T20:40:17,617 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-12T20:40:17,617 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-12T20:40:17,617 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-12T20:40:17,621 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-12T20:40:17,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:17,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:17,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:17,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:17,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-12T20:40:17,625 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-12T20:40:17,627 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=135, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,627 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-12T20:40:17,628 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734036017627"}]},"ts":"9223372036854775807"} 2024-12-12T20:40:17,628 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734036017627"}]},"ts":"9223372036854775807"} 2024-12-12T20:40:17,629 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-12T20:40:17,630 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 8aad17ad3243c93d19b0a0f31dc6ae44, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734035981508.8aad17ad3243c93d19b0a0f31dc6ae44.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => bf7783e575074cc3a4ec1d197e979973, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734035981508.bf7783e575074cc3a4ec1d197e979973.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T20:40:17,630 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-12T20:40:17,630 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734036017630"}]},"ts":"9223372036854775807"} 2024-12-12T20:40:17,632 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-12T20:40:17,641 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=135, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,643 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 41 msec 2024-12-12T20:40:17,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-12T20:40:17,726 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 135 completed 2024-12-12T20:40:17,738 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-12T20:40:17,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,741 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-12T20:40:17,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:17,749 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" 2024-12-12T20:40:17,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:17,780 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=800 (was 789) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-32 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:39271 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1229621394_1 at /127.0.0.1:37722 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:37752 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:41228 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-29 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-31 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-30 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-33 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-31 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4629 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-33 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-30 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-32 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:37820 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-29 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-28 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-34 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 67948) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1229621394_1 at /127.0.0.1:37812 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39271 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-34 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=807 (was 794) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1473 (was 1454) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 20), AvailableMemoryMB=2350 (was 3345) 2024-12-12T20:40:17,780 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-12T20:40:17,803 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=800, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=1473, ProcessCount=20, AvailableMemoryMB=2345 2024-12-12T20:40:17,803 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-12T20:40:17,805 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:40:17,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T20:40:17,808 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:40:17,809 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:17,809 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 136 2024-12-12T20:40:17,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T20:40:17,810 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:40:17,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742164_1340 (size=407) 2024-12-12T20:40:17,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742164_1340 (size=407) 2024-12-12T20:40:17,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742164_1340 (size=407) 2024-12-12T20:40:17,868 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5205433f2a6c9b509c227daf8acaef17, NAME => 'testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:17,876 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 332c8ddf59d491e4debdd5c9e0310ba0, NAME => 'testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:17,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T20:40:17,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742165_1341 (size=68) 2024-12-12T20:40:17,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742165_1341 (size=68) 2024-12-12T20:40:17,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742165_1341 (size=68) 2024-12-12T20:40:17,915 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:17,915 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 332c8ddf59d491e4debdd5c9e0310ba0, disabling compactions & flushes 2024-12-12T20:40:17,915 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:17,915 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:17,915 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. after waiting 0 ms 2024-12-12T20:40:17,916 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:17,916 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:17,916 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 332c8ddf59d491e4debdd5c9e0310ba0: 2024-12-12T20:40:17,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742166_1342 (size=68) 2024-12-12T20:40:17,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742166_1342 (size=68) 2024-12-12T20:40:17,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742166_1342 (size=68) 2024-12-12T20:40:17,943 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:17,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing 5205433f2a6c9b509c227daf8acaef17, disabling compactions & flushes 2024-12-12T20:40:17,944 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:17,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:17,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. after waiting 0 ms 2024-12-12T20:40:17,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:17,944 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:17,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5205433f2a6c9b509c227daf8acaef17: 2024-12-12T20:40:17,945 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:40:17,946 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734036017945"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734036017945"}]},"ts":"1734036017945"} 2024-12-12T20:40:17,946 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734036017945"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734036017945"}]},"ts":"1734036017945"} 2024-12-12T20:40:17,949 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:40:17,953 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:40:17,953 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036017953"}]},"ts":"1734036017953"} 2024-12-12T20:40:17,960 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-12T20:40:18,041 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:40:18,043 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:40:18,043 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:40:18,043 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:40:18,043 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:40:18,043 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:40:18,043 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:40:18,043 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:40:18,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5205433f2a6c9b509c227daf8acaef17, ASSIGN}, {pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=332c8ddf59d491e4debdd5c9e0310ba0, ASSIGN}] 2024-12-12T20:40:18,048 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=332c8ddf59d491e4debdd5c9e0310ba0, ASSIGN 2024-12-12T20:40:18,048 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5205433f2a6c9b509c227daf8acaef17, ASSIGN 2024-12-12T20:40:18,052 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=332c8ddf59d491e4debdd5c9e0310ba0, ASSIGN; state=OFFLINE, location=0a737713f6e3,42407,1734035781742; forceNewPlan=false, retain=false 2024-12-12T20:40:18,052 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5205433f2a6c9b509c227daf8acaef17, ASSIGN; state=OFFLINE, location=0a737713f6e3,46859,1734035782031; forceNewPlan=false, retain=false 2024-12-12T20:40:18,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T20:40:18,202 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:40:18,203 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=5205433f2a6c9b509c227daf8acaef17, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:40:18,203 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=332c8ddf59d491e4debdd5c9e0310ba0, regionState=OPENING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:40:18,206 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=137, state=RUNNABLE; OpenRegionProcedure 5205433f2a6c9b509c227daf8acaef17, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:40:18,207 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=138, state=RUNNABLE; OpenRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:40:18,359 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:18,362 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:18,367 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:18,367 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7285): Opening region: {ENCODED => 5205433f2a6c9b509c227daf8acaef17, NAME => 'testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T20:40:18,368 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. service=AccessControlService 2024-12-12T20:40:18,368 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:40:18,368 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:18,368 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:18,369 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7327): checking encryption for 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:18,369 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7330): checking classloading for 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:18,383 INFO [StoreOpener-5205433f2a6c9b509c227daf8acaef17-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:18,392 INFO [StoreOpener-5205433f2a6c9b509c227daf8acaef17-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5205433f2a6c9b509c227daf8acaef17 columnFamilyName cf 2024-12-12T20:40:18,392 DEBUG [StoreOpener-5205433f2a6c9b509c227daf8acaef17-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:18,392 INFO [StoreOpener-5205433f2a6c9b509c227daf8acaef17-1 {}] regionserver.HStore(327): Store=5205433f2a6c9b509c227daf8acaef17/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:40:18,394 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:18,394 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:18,395 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:18,395 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7285): Opening region: {ENCODED => 332c8ddf59d491e4debdd5c9e0310ba0, NAME => 'testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T20:40:18,395 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. service=AccessControlService 2024-12-12T20:40:18,396 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:40:18,396 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:18,396 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:18,396 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7327): checking encryption for 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:18,396 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7330): checking classloading for 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:18,399 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1085): writing seq id for 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:18,401 INFO [StoreOpener-332c8ddf59d491e4debdd5c9e0310ba0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:18,403 INFO [StoreOpener-332c8ddf59d491e4debdd5c9e0310ba0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 332c8ddf59d491e4debdd5c9e0310ba0 columnFamilyName cf 2024-12-12T20:40:18,403 DEBUG [StoreOpener-332c8ddf59d491e4debdd5c9e0310ba0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:18,404 INFO [StoreOpener-332c8ddf59d491e4debdd5c9e0310ba0-1 {}] regionserver.HStore(327): Store=332c8ddf59d491e4debdd5c9e0310ba0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:40:18,405 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:18,405 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:18,405 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:40:18,406 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1102): Opened 5205433f2a6c9b509c227daf8acaef17; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71647750, jitterRate=0.06763467192649841}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:40:18,407 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1001): Region open journal for 5205433f2a6c9b509c227daf8acaef17: 2024-12-12T20:40:18,411 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17., pid=139, masterSystemTime=1734036018358 2024-12-12T20:40:18,412 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1085): writing seq id for 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:18,413 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:18,413 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:18,417 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=5205433f2a6c9b509c227daf8acaef17, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:40:18,421 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:40:18,422 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1102): Opened 332c8ddf59d491e4debdd5c9e0310ba0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63770728, jitterRate=-0.04974210262298584}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:40:18,422 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1001): Region open journal for 332c8ddf59d491e4debdd5c9e0310ba0: 2024-12-12T20:40:18,424 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0., pid=140, masterSystemTime=1734036018362 2024-12-12T20:40:18,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T20:40:18,427 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:18,427 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:18,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=137 2024-12-12T20:40:18,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=137, state=SUCCESS; OpenRegionProcedure 5205433f2a6c9b509c227daf8acaef17, server=0a737713f6e3,46859,1734035782031 in 217 msec 2024-12-12T20:40:18,427 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=332c8ddf59d491e4debdd5c9e0310ba0, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:40:18,431 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5205433f2a6c9b509c227daf8acaef17, ASSIGN in 384 msec 2024-12-12T20:40:18,436 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=138 2024-12-12T20:40:18,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=138, state=SUCCESS; OpenRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0, server=0a737713f6e3,42407,1734035781742 in 222 msec 2024-12-12T20:40:18,438 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=138, resume processing ppid=136 2024-12-12T20:40:18,438 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=332c8ddf59d491e4debdd5c9e0310ba0, ASSIGN in 393 msec 2024-12-12T20:40:18,440 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:40:18,440 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036018440"}]},"ts":"1734036018440"} 2024-12-12T20:40:18,443 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-12T20:40:18,486 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:40:18,486 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-12T20:40:18,489 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-12T20:40:18,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:18,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:18,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:18,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:18,510 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:18,510 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:18,511 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:18,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 703 msec 2024-12-12T20:40:18,515 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:18,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T20:40:18,928 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 136 completed 2024-12-12T20:40:18,928 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-12T20:40:18,928 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:18,947 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-12T20:40:18,948 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:18,948 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-12T20:40:18,951 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-12T20:40:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734036018951 (current time:1734036018951). 2024-12-12T20:40:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:40:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-12T20:40:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:40:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4555689b to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7062499 2024-12-12T20:40:18,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a60b9a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:18,988 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4555689b to 127.0.0.1:52655 2024-12-12T20:40:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:18,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ba4f3d2 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d3036d3 2024-12-12T20:40:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32c27f24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:19,051 DEBUG [hconnection-0x5dd4c4fa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:19,052 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37756, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ba4f3d2 to 127.0.0.1:52655 2024-12-12T20:40:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-12T20:40:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:40:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-12T20:40:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-12T20:40:19,064 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:40:19,066 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:40:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T20:40:19,082 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:40:19,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742167_1343 (size=170) 2024-12-12T20:40:19,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742167_1343 (size=170) 2024-12-12T20:40:19,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742167_1343 (size=170) 2024-12-12T20:40:19,150 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:40:19,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 5205433f2a6c9b509c227daf8acaef17}, {pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0}] 2024-12-12T20:40:19,153 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:19,154 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:19,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T20:40:19,300 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:40:19,306 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:19,306 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:19,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=143 2024-12-12T20:40:19,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-12T20:40:19,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:19,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:19,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2538): Flush status journal for 5205433f2a6c9b509c227daf8acaef17: 2024-12-12T20:40:19,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-12T20:40:19,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 332c8ddf59d491e4debdd5c9e0310ba0: 2024-12-12T20:40:19,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-12T20:40:19,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-12T20:40:19,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:19,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:40:19,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-12T20:40:19,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:19,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:40:19,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T20:40:19,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742168_1344 (size=71) 2024-12-12T20:40:19,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742168_1344 (size=71) 2024-12-12T20:40:19,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742168_1344 (size=71) 2024-12-12T20:40:19,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:19,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-12T20:40:19,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=142 2024-12-12T20:40:19,380 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:19,380 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:19,383 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; SnapshotRegionProcedure 5205433f2a6c9b509c227daf8acaef17 in 232 msec 2024-12-12T20:40:19,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742169_1345 (size=71) 2024-12-12T20:40:19,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742169_1345 (size=71) 2024-12-12T20:40:19,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742169_1345 (size=71) 2024-12-12T20:40:19,386 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:19,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-12T20:40:19,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-12T20:40:19,387 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:19,387 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:19,390 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=141 2024-12-12T20:40:19,390 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:40:19,390 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=141, state=SUCCESS; SnapshotRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0 in 238 msec 2024-12-12T20:40:19,391 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:40:19,392 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:40:19,393 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-12T20:40:19,393 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-12T20:40:19,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742170_1346 (size=552) 2024-12-12T20:40:19,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742170_1346 (size=552) 2024-12-12T20:40:19,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742170_1346 (size=552) 2024-12-12T20:40:19,454 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:40:19,473 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:40:19,473 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-12T20:40:19,476 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:40:19,476 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-12T20:40:19,479 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 415 msec 2024-12-12T20:40:19,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T20:40:19,672 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 141 completed 2024-12-12T20:40:19,684 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46859 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:40:19,689 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42407 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:40:19,692 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-12T20:40:19,692 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:19,693 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:19,712 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-12T20:40:19,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734036019712 (current time:1734036019712). 2024-12-12T20:40:19,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:40:19,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-12T20:40:19,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:40:19,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6ce2c76a to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@176a2d18 2024-12-12T20:40:19,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22fa840a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:19,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:19,739 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37768, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:19,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6ce2c76a to 127.0.0.1:52655 2024-12-12T20:40:19,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:19,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x50fc2c17 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@16b8c9fd 2024-12-12T20:40:19,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6094c5f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:19,804 DEBUG [hconnection-0x3127d428-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:19,806 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:19,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x50fc2c17 to 127.0.0.1:52655 2024-12-12T20:40:19,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:19,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-12T20:40:19,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:40:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-12T20:40:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-12T20:40:19,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T20:40:19,818 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:40:19,823 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:40:19,847 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:40:19,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T20:40:19,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742171_1347 (size=165) 2024-12-12T20:40:19,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742171_1347 (size=165) 2024-12-12T20:40:19,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742171_1347 (size=165) 2024-12-12T20:40:19,951 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:40:19,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 5205433f2a6c9b509c227daf8acaef17}, {pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0}] 2024-12-12T20:40:19,959 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:19,960 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:20,002 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-12T20:40:20,117 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:20,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=145 2024-12-12T20:40:20,118 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:20,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T20:40:20,118 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:20,118 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 5205433f2a6c9b509c227daf8acaef17 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-12T20:40:20,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=146 2024-12-12T20:40:20,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:20,120 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing 332c8ddf59d491e4debdd5c9e0310ba0 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-12T20:40:20,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/.tmp/cf/a838ef494839488d870cea55cb5a18a4 is 71, key is 15db16377d8ff060dcbc8bade1e85b22/cf:q/1734036019689/Put/seqid=0 2024-12-12T20:40:20,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/.tmp/cf/867f6f6709e442e6be6267f2d54511c0 is 71, key is 0d30913e0b289ed438b1fb996e87b9e0/cf:q/1734036019684/Put/seqid=0 2024-12-12T20:40:20,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742172_1348 (size=5216) 2024-12-12T20:40:20,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742172_1348 (size=5216) 2024-12-12T20:40:20,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742172_1348 (size=5216) 2024-12-12T20:40:20,146 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/.tmp/cf/867f6f6709e442e6be6267f2d54511c0 2024-12-12T20:40:20,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742173_1349 (size=8392) 2024-12-12T20:40:20,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742173_1349 (size=8392) 2024-12-12T20:40:20,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742173_1349 (size=8392) 2024-12-12T20:40:20,148 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/.tmp/cf/a838ef494839488d870cea55cb5a18a4 2024-12-12T20:40:20,152 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/.tmp/cf/867f6f6709e442e6be6267f2d54511c0 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/cf/867f6f6709e442e6be6267f2d54511c0 2024-12-12T20:40:20,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/.tmp/cf/a838ef494839488d870cea55cb5a18a4 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/cf/a838ef494839488d870cea55cb5a18a4 2024-12-12T20:40:20,164 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/cf/a838ef494839488d870cea55cb5a18a4, entries=48, sequenceid=6, filesize=8.2 K 2024-12-12T20:40:20,166 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 332c8ddf59d491e4debdd5c9e0310ba0 in 47ms, sequenceid=6, compaction requested=false 2024-12-12T20:40:20,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for 332c8ddf59d491e4debdd5c9e0310ba0: 2024-12-12T20:40:20,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. for snaptb0-testExportExpiredSnapshot completed. 2024-12-12T20:40:20,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-12T20:40:20,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:20,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/cf/a838ef494839488d870cea55cb5a18a4] hfiles 2024-12-12T20:40:20,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/cf/a838ef494839488d870cea55cb5a18a4 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-12T20:40:20,168 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/cf/867f6f6709e442e6be6267f2d54511c0, entries=2, sequenceid=6, filesize=5.1 K 2024-12-12T20:40:20,169 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 5205433f2a6c9b509c227daf8acaef17 in 51ms, sequenceid=6, compaction requested=false 2024-12-12T20:40:20,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 5205433f2a6c9b509c227daf8acaef17: 2024-12-12T20:40:20,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. for snaptb0-testExportExpiredSnapshot completed. 2024-12-12T20:40:20,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-12T20:40:20,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:20,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/cf/867f6f6709e442e6be6267f2d54511c0] hfiles 2024-12-12T20:40:20,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/cf/867f6f6709e442e6be6267f2d54511c0 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-12T20:40:20,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742174_1350 (size=110) 2024-12-12T20:40:20,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742174_1350 (size=110) 2024-12-12T20:40:20,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742174_1350 (size=110) 2024-12-12T20:40:20,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:20,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146 2024-12-12T20:40:20,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=146 2024-12-12T20:40:20,202 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:20,202 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:20,204 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=144, state=SUCCESS; SnapshotRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0 in 252 msec 2024-12-12T20:40:20,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742175_1351 (size=110) 2024-12-12T20:40:20,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742175_1351 (size=110) 2024-12-12T20:40:20,213 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:20,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742175_1351 (size=110) 2024-12-12T20:40:20,214 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-12T20:40:20,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-12T20:40:20,214 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:20,214 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:20,216 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-12T20:40:20,216 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:40:20,216 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; SnapshotRegionProcedure 5205433f2a6c9b509c227daf8acaef17 in 264 msec 2024-12-12T20:40:20,217 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:40:20,217 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:40:20,217 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-12T20:40:20,218 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-12T20:40:20,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742176_1352 (size=630) 2024-12-12T20:40:20,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742176_1352 (size=630) 2024-12-12T20:40:20,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742176_1352 (size=630) 2024-12-12T20:40:20,236 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:40:20,242 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:40:20,243 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-12T20:40:20,244 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:40:20,244 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-12T20:40:20,246 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 435 msec 2024-12-12T20:40:20,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T20:40:20,420 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 144 completed 2024-12-12T20:40:20,421 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:40:20,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-12T20:40:20,422 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:40:20,423 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:20,423 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 147 2024-12-12T20:40:20,423 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:40:20,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-12T20:40:20,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742177_1353 (size=400) 2024-12-12T20:40:20,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742177_1353 (size=400) 2024-12-12T20:40:20,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742177_1353 (size=400) 2024-12-12T20:40:20,437 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1c36c2e04ccc3d3187435c7e0ba7d6c6, NAME => 'testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:20,438 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => d5a17aa0c520a1cdf2abb50af9af5441, NAME => 'testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:20,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742178_1354 (size=61) 2024-12-12T20:40:20,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742178_1354 (size=61) 2024-12-12T20:40:20,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742178_1354 (size=61) 2024-12-12T20:40:20,453 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:20,454 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing 1c36c2e04ccc3d3187435c7e0ba7d6c6, disabling compactions & flushes 2024-12-12T20:40:20,454 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:40:20,454 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:40:20,454 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. after waiting 0 ms 2024-12-12T20:40:20,454 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:40:20,454 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:40:20,454 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1c36c2e04ccc3d3187435c7e0ba7d6c6: 2024-12-12T20:40:20,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742179_1355 (size=61) 2024-12-12T20:40:20,458 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:20,458 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing d5a17aa0c520a1cdf2abb50af9af5441, disabling compactions & flushes 2024-12-12T20:40:20,458 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:40:20,458 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:40:20,458 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. after waiting 0 ms 2024-12-12T20:40:20,458 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:40:20,458 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:40:20,458 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for d5a17aa0c520a1cdf2abb50af9af5441: 2024-12-12T20:40:20,459 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:40:20,460 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734036020459"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734036020459"}]},"ts":"1734036020459"} 2024-12-12T20:40:20,460 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734036020459"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734036020459"}]},"ts":"1734036020459"} 2024-12-12T20:40:20,461 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:40:20,462 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:40:20,462 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036020462"}]},"ts":"1734036020462"} 2024-12-12T20:40:20,464 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-12T20:40:20,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742179_1355 (size=61) 2024-12-12T20:40:20,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742179_1355 (size=61) 2024-12-12T20:40:20,507 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:40:20,512 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:40:20,512 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:40:20,512 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:40:20,512 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:40:20,512 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:40:20,512 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:40:20,512 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:40:20,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1c36c2e04ccc3d3187435c7e0ba7d6c6, ASSIGN}, {pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d5a17aa0c520a1cdf2abb50af9af5441, ASSIGN}] 2024-12-12T20:40:20,515 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1c36c2e04ccc3d3187435c7e0ba7d6c6, ASSIGN 2024-12-12T20:40:20,516 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d5a17aa0c520a1cdf2abb50af9af5441, ASSIGN 2024-12-12T20:40:20,517 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1c36c2e04ccc3d3187435c7e0ba7d6c6, ASSIGN; state=OFFLINE, location=0a737713f6e3,36817,1734035782356; forceNewPlan=false, retain=false 2024-12-12T20:40:20,517 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d5a17aa0c520a1cdf2abb50af9af5441, ASSIGN; state=OFFLINE, location=0a737713f6e3,42407,1734035781742; forceNewPlan=false, retain=false 2024-12-12T20:40:20,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-12T20:40:20,668 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:40:20,669 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=d5a17aa0c520a1cdf2abb50af9af5441, regionState=OPENING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:40:20,669 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=1c36c2e04ccc3d3187435c7e0ba7d6c6, regionState=OPENING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:40:20,682 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; OpenRegionProcedure d5a17aa0c520a1cdf2abb50af9af5441, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:40:20,696 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=148, state=RUNNABLE; OpenRegionProcedure 1c36c2e04ccc3d3187435c7e0ba7d6c6, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:40:20,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-12T20:40:20,851 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:20,859 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:40:20,860 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => d5a17aa0c520a1cdf2abb50af9af5441, NAME => 'testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T20:40:20,860 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. service=AccessControlService 2024-12-12T20:40:20,860 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:40:20,860 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:40:20,861 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:20,861 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:40:20,861 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:40:20,862 INFO [StoreOpener-d5a17aa0c520a1cdf2abb50af9af5441-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:40:20,865 INFO [StoreOpener-d5a17aa0c520a1cdf2abb50af9af5441-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d5a17aa0c520a1cdf2abb50af9af5441 columnFamilyName cf 2024-12-12T20:40:20,865 DEBUG [StoreOpener-d5a17aa0c520a1cdf2abb50af9af5441-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:20,869 INFO [StoreOpener-d5a17aa0c520a1cdf2abb50af9af5441-1 {}] regionserver.HStore(327): Store=d5a17aa0c520a1cdf2abb50af9af5441/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:40:20,870 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:40:20,873 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:40:20,876 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:40:20,878 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:40:20,878 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 1c36c2e04ccc3d3187435c7e0ba7d6c6, NAME => 'testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T20:40:20,879 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. service=AccessControlService 2024-12-12T20:40:20,879 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:40:20,879 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:40:20,879 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:20,880 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:40:20,880 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:40:20,887 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:40:20,889 INFO [StoreOpener-1c36c2e04ccc3d3187435c7e0ba7d6c6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:40:20,894 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/d5a17aa0c520a1cdf2abb50af9af5441/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:40:20,894 INFO [StoreOpener-1c36c2e04ccc3d3187435c7e0ba7d6c6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1c36c2e04ccc3d3187435c7e0ba7d6c6 columnFamilyName cf 2024-12-12T20:40:20,894 DEBUG [StoreOpener-1c36c2e04ccc3d3187435c7e0ba7d6c6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:20,899 INFO [StoreOpener-1c36c2e04ccc3d3187435c7e0ba7d6c6-1 {}] regionserver.HStore(327): Store=1c36c2e04ccc3d3187435c7e0ba7d6c6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:40:20,899 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened d5a17aa0c520a1cdf2abb50af9af5441; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71582999, jitterRate=0.06666980683803558}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:40:20,900 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:40:20,900 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for d5a17aa0c520a1cdf2abb50af9af5441: 2024-12-12T20:40:20,901 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:40:20,907 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:40:20,907 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441., pid=150, masterSystemTime=1734036020851 2024-12-12T20:40:20,911 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/1c36c2e04ccc3d3187435c7e0ba7d6c6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:40:20,912 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:40:20,912 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:40:20,912 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 1c36c2e04ccc3d3187435c7e0ba7d6c6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74806896, jitterRate=0.11470961570739746}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:40:20,912 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 1c36c2e04ccc3d3187435c7e0ba7d6c6: 2024-12-12T20:40:20,913 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6., pid=151, masterSystemTime=1734036020870 2024-12-12T20:40:20,916 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=d5a17aa0c520a1cdf2abb50af9af5441, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:40:20,916 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:40:20,916 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:40:20,920 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=1c36c2e04ccc3d3187435c7e0ba7d6c6, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:40:20,924 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=0a737713f6e3,36817,1734035782356, table=testExportExpiredSnapshot, region=1c36c2e04ccc3d3187435c7e0ba7d6c6. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-12T20:40:20,932 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-12T20:40:20,932 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; OpenRegionProcedure d5a17aa0c520a1cdf2abb50af9af5441, server=0a737713f6e3,42407,1734035781742 in 241 msec 2024-12-12T20:40:20,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=148 2024-12-12T20:40:20,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d5a17aa0c520a1cdf2abb50af9af5441, ASSIGN in 419 msec 2024-12-12T20:40:20,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=148, state=SUCCESS; OpenRegionProcedure 1c36c2e04ccc3d3187435c7e0ba7d6c6, server=0a737713f6e3,36817,1734035782356 in 244 msec 2024-12-12T20:40:20,952 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-12-12T20:40:20,952 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1c36c2e04ccc3d3187435c7e0ba7d6c6, ASSIGN in 426 msec 2024-12-12T20:40:20,953 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:40:20,954 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036020953"}]},"ts":"1734036020953"} 2024-12-12T20:40:20,958 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-12T20:40:20,984 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:40:20,985 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-12T20:40:20,987 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-12T20:40:21,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:21,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:21,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:21,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:21,009 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:21,016 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:21,016 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:21,016 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:21,016 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:21,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-12T20:40:21,019 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=testExportExpiredSnapshot in 595 msec 2024-12-12T20:40:21,021 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:21,022 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:21,022 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:21,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-12T20:40:21,030 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportExpiredSnapshot, procId: 147 completed 2024-12-12T20:40:21,030 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-12T20:40:21,030 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:21,035 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-12T20:40:21,035 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:21,035 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportExpiredSnapshot assigned. 2024-12-12T20:40:21,051 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36817 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:40:21,052 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42407 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:40:21,058 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportExpiredSnapshot 2024-12-12T20:40:21,058 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:40:21,059 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:21,076 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-12T20:40:21,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-12T20:40:21,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:40:21,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f51a7a6 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@77a6fd92 2024-12-12T20:40:21,106 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0006_000001 (auth:SIMPLE) from 127.0.0.1:52238 2024-12-12T20:40:21,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67a08d58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:21,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:21,115 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:21,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f51a7a6 to 127.0.0.1:52655 2024-12-12T20:40:21,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:21,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72c528f8 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@324f115f 2024-12-12T20:40:21,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-12T20:40:21,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-12T20:40:21,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-12T20:40:21,127 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-12T20:40:21,128 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T20:40:21,128 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T20:40:21,132 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0006/container_1734035798001_0006_01_000001/launch_container.sh] 2024-12-12T20:40:21,132 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0006/container_1734035798001_0006_01_000001/container_tokens] 2024-12-12T20:40:21,132 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0006/container_1734035798001_0006_01_000001/sysfs] 2024-12-12T20:40:21,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51e69003, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:21,179 DEBUG [hconnection-0x3fcd3a8e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:21,182 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:21,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72c528f8 to 127.0.0.1:52655 2024-12-12T20:40:21,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:21,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-12T20:40:21,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:40:21,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-12T20:40:21,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-12T20:40:21,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-12T20:40:21,229 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:40:21,231 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:40:21,238 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:40:21,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742180_1356 (size=152) 2024-12-12T20:40:21,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742180_1356 (size=152) 2024-12-12T20:40:21,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742180_1356 (size=152) 2024-12-12T20:40:21,324 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:40:21,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 1c36c2e04ccc3d3187435c7e0ba7d6c6}, {pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure d5a17aa0c520a1cdf2abb50af9af5441}] 2024-12-12T20:40:21,325 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:40:21,326 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:40:21,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-12T20:40:21,483 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:21,483 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:40:21,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=154 2024-12-12T20:40:21,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=153 2024-12-12T20:40:21,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:40:21,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:40:21,484 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing d5a17aa0c520a1cdf2abb50af9af5441 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-12T20:40:21,487 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing 1c36c2e04ccc3d3187435c7e0ba7d6c6 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-12T20:40:21,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/d5a17aa0c520a1cdf2abb50af9af5441/.tmp/cf/86f9af6cdbf94c2381f59920d07359fb is 71, key is 1140095859477ddacb25ebcf246706f0/cf:q/1734036021052/Put/seqid=0 2024-12-12T20:40:21,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-12T20:40:21,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/1c36c2e04ccc3d3187435c7e0ba7d6c6/.tmp/cf/4f6a2fe4e70a4fbbba4486c600d48efa is 71, key is 07e87537bd33dd120d64bae4e22ed931/cf:q/1734036021051/Put/seqid=0 2024-12-12T20:40:21,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742181_1357 (size=8324) 2024-12-12T20:40:21,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742181_1357 (size=8324) 2024-12-12T20:40:21,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742181_1357 (size=8324) 2024-12-12T20:40:21,568 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/d5a17aa0c520a1cdf2abb50af9af5441/.tmp/cf/86f9af6cdbf94c2381f59920d07359fb 2024-12-12T20:40:21,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/d5a17aa0c520a1cdf2abb50af9af5441/.tmp/cf/86f9af6cdbf94c2381f59920d07359fb as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/d5a17aa0c520a1cdf2abb50af9af5441/cf/86f9af6cdbf94c2381f59920d07359fb 2024-12-12T20:40:21,581 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/d5a17aa0c520a1cdf2abb50af9af5441/cf/86f9af6cdbf94c2381f59920d07359fb, entries=47, sequenceid=5, filesize=8.1 K 2024-12-12T20:40:21,582 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for d5a17aa0c520a1cdf2abb50af9af5441 in 98ms, sequenceid=5, compaction requested=false 2024-12-12T20:40:21,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-12T20:40:21,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for d5a17aa0c520a1cdf2abb50af9af5441: 2024-12-12T20:40:21,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. for snapshot-testExportExpiredSnapshot completed. 2024-12-12T20:40:21,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-12T20:40:21,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:21,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/d5a17aa0c520a1cdf2abb50af9af5441/cf/86f9af6cdbf94c2381f59920d07359fb] hfiles 2024-12-12T20:40:21,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/d5a17aa0c520a1cdf2abb50af9af5441/cf/86f9af6cdbf94c2381f59920d07359fb for snapshot=snapshot-testExportExpiredSnapshot 2024-12-12T20:40:21,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742182_1358 (size=5288) 2024-12-12T20:40:21,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742182_1358 (size=5288) 2024-12-12T20:40:21,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742182_1358 (size=5288) 2024-12-12T20:40:21,619 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/1c36c2e04ccc3d3187435c7e0ba7d6c6/.tmp/cf/4f6a2fe4e70a4fbbba4486c600d48efa 2024-12-12T20:40:21,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/1c36c2e04ccc3d3187435c7e0ba7d6c6/.tmp/cf/4f6a2fe4e70a4fbbba4486c600d48efa as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/1c36c2e04ccc3d3187435c7e0ba7d6c6/cf/4f6a2fe4e70a4fbbba4486c600d48efa 2024-12-12T20:40:21,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742183_1359 (size=103) 2024-12-12T20:40:21,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742183_1359 (size=103) 2024-12-12T20:40:21,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742183_1359 (size=103) 2024-12-12T20:40:21,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:40:21,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-12T20:40:21,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-12T20:40:21,639 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:40:21,639 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:40:21,647 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/1c36c2e04ccc3d3187435c7e0ba7d6c6/cf/4f6a2fe4e70a4fbbba4486c600d48efa, entries=3, sequenceid=5, filesize=5.2 K 2024-12-12T20:40:21,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; SnapshotRegionProcedure d5a17aa0c520a1cdf2abb50af9af5441 in 319 msec 2024-12-12T20:40:21,651 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 1c36c2e04ccc3d3187435c7e0ba7d6c6 in 164ms, sequenceid=5, compaction requested=false 2024-12-12T20:40:21,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for 1c36c2e04ccc3d3187435c7e0ba7d6c6: 2024-12-12T20:40:21,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. for snapshot-testExportExpiredSnapshot completed. 2024-12-12T20:40:21,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-12T20:40:21,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:21,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/1c36c2e04ccc3d3187435c7e0ba7d6c6/cf/4f6a2fe4e70a4fbbba4486c600d48efa] hfiles 2024-12-12T20:40:21,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/1c36c2e04ccc3d3187435c7e0ba7d6c6/cf/4f6a2fe4e70a4fbbba4486c600d48efa for snapshot=snapshot-testExportExpiredSnapshot 2024-12-12T20:40:21,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742184_1360 (size=103) 2024-12-12T20:40:21,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742184_1360 (size=103) 2024-12-12T20:40:21,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742184_1360 (size=103) 2024-12-12T20:40:21,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:40:21,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-12T20:40:21,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-12T20:40:21,691 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:40:21,691 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:40:21,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-12T20:40:21,698 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:40:21,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; SnapshotRegionProcedure 1c36c2e04ccc3d3187435c7e0ba7d6c6 in 371 msec 2024-12-12T20:40:21,699 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:40:21,701 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:40:21,701 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-12T20:40:21,702 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-12T20:40:21,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742185_1361 (size=609) 2024-12-12T20:40:21,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742185_1361 (size=609) 2024-12-12T20:40:21,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742185_1361 (size=609) 2024-12-12T20:40:21,723 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:40:21,737 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:40:21,742 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-12T20:40:21,751 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:40:21,752 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-12T20:40:21,765 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 529 msec 2024-12-12T20:40:21,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-12T20:40:21,840 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot, procId: 152 completed 2024-12-12T20:40:22,569 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:40:27,808 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1c36c2e04ccc3d3187435c7e0ba7d6c6 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:40:27,808 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region d5a17aa0c520a1cdf2abb50af9af5441 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:40:27,808 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 332c8ddf59d491e4debdd5c9e0310ba0 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:40:27,808 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 5205433f2a6c9b509c227daf8acaef17 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:40:31,859 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036031859 2024-12-12T20:40:31,859 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37615, tgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036031859, rawTgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036031859, srcFsUri=hdfs://localhost:37615, srcDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:31,890 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37615, inputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:31,890 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036031859, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036031859/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-12T20:40:31,892 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-12T20:40:31,894 ERROR [Time-limited test {}] util.AbstractHBaseTool(153): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:948) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1093) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:315) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T20:40:31,895 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportExpiredSnapshot 2024-12-12T20:40:31,895 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-12T20:40:31,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T20:40:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T20:40:31,907 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036031907"}]},"ts":"1734036031907"} 2024-12-12T20:40:31,912 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-12T20:40:31,923 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-12T20:40:31,924 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-12T20:40:31,926 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5205433f2a6c9b509c227daf8acaef17, UNASSIGN}, {pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=332c8ddf59d491e4debdd5c9e0310ba0, UNASSIGN}] 2024-12-12T20:40:31,926 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5205433f2a6c9b509c227daf8acaef17, UNASSIGN 2024-12-12T20:40:31,926 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=332c8ddf59d491e4debdd5c9e0310ba0, UNASSIGN 2024-12-12T20:40:31,927 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=332c8ddf59d491e4debdd5c9e0310ba0, regionState=CLOSING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:40:31,927 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=5205433f2a6c9b509c227daf8acaef17, regionState=CLOSING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:40:31,928 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:40:31,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; CloseRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:40:31,929 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:40:31,929 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=157, state=RUNNABLE; CloseRegionProcedure 5205433f2a6c9b509c227daf8acaef17, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:40:32,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T20:40:32,080 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:32,080 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:32,080 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:40:32,080 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing 332c8ddf59d491e4debdd5c9e0310ba0, disabling compactions & flushes 2024-12-12T20:40:32,080 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:32,080 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:32,080 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. after waiting 0 ms 2024-12-12T20:40:32,081 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:32,081 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:32,083 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(124): Close 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:32,083 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:40:32,083 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1681): Closing 5205433f2a6c9b509c227daf8acaef17, disabling compactions & flushes 2024-12-12T20:40:32,083 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:32,084 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:32,084 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. after waiting 0 ms 2024-12-12T20:40:32,084 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:32,088 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:40:32,089 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:40:32,090 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17. 2024-12-12T20:40:32,090 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1635): Region close journal for 5205433f2a6c9b509c227daf8acaef17: 2024-12-12T20:40:32,090 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:40:32,091 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:40:32,091 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0. 2024-12-12T20:40:32,091 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for 332c8ddf59d491e4debdd5c9e0310ba0: 2024-12-12T20:40:32,093 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(170): Closed 5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:32,093 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=5205433f2a6c9b509c227daf8acaef17, regionState=CLOSED 2024-12-12T20:40:32,096 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed 332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:32,097 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=332c8ddf59d491e4debdd5c9e0310ba0, regionState=CLOSED 2024-12-12T20:40:32,101 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=157 2024-12-12T20:40:32,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-12T20:40:32,102 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=157, state=SUCCESS; CloseRegionProcedure 5205433f2a6c9b509c227daf8acaef17, server=0a737713f6e3,46859,1734035782031 in 169 msec 2024-12-12T20:40:32,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseRegionProcedure 332c8ddf59d491e4debdd5c9e0310ba0, server=0a737713f6e3,42407,1734035781742 in 171 msec 2024-12-12T20:40:32,103 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5205433f2a6c9b509c227daf8acaef17, UNASSIGN in 175 msec 2024-12-12T20:40:32,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=156 2024-12-12T20:40:32,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=332c8ddf59d491e4debdd5c9e0310ba0, UNASSIGN in 176 msec 2024-12-12T20:40:32,108 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-12T20:40:32,108 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 182 msec 2024-12-12T20:40:32,109 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036032109"}]},"ts":"1734036032109"} 2024-12-12T20:40:32,116 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-12T20:40:32,147 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-12T20:40:32,153 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 257 msec 2024-12-12T20:40:32,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T20:40:32,205 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 155 completed 2024-12-12T20:40:32,206 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,207 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,208 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,209 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,212 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:32,213 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:32,214 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/recovered.edits] 2024-12-12T20:40:32,215 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/recovered.edits] 2024-12-12T20:40:32,220 DEBUG [HFileArchiver-34 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/cf/867f6f6709e442e6be6267f2d54511c0 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/cf/867f6f6709e442e6be6267f2d54511c0 2024-12-12T20:40:32,220 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/cf/a838ef494839488d870cea55cb5a18a4 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/cf/a838ef494839488d870cea55cb5a18a4 2024-12-12T20:40:32,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,224 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-12T20:40:32,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,224 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0/recovered.edits/9.seqid 2024-12-12T20:40:32,225 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-12T20:40:32,225 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-12T20:40:32,225 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17/recovered.edits/9.seqid 2024-12-12T20:40:32,225 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-12T20:40:32,226 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/332c8ddf59d491e4debdd5c9e0310ba0 2024-12-12T20:40:32,226 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportExpiredSnapshot/5205433f2a6c9b509c227daf8acaef17 2024-12-12T20:40:32,226 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-12T20:40:32,231 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:32,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:32,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:32,235 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-12T20:40:32,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:32,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:32,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:32,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:32,238 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-12T20:40:32,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T20:40:32,243 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,243 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-12T20:40:32,243 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734036032243"}]},"ts":"9223372036854775807"} 2024-12-12T20:40:32,243 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734036032243"}]},"ts":"9223372036854775807"} 2024-12-12T20:40:32,255 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:32,261 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-12T20:40:32,261 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5205433f2a6c9b509c227daf8acaef17, NAME => 'testtb-testExportExpiredSnapshot,,1734036017804.5205433f2a6c9b509c227daf8acaef17.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 332c8ddf59d491e4debdd5c9e0310ba0, NAME => 'testtb-testExportExpiredSnapshot,1,1734036017804.332c8ddf59d491e4debdd5c9e0310ba0.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T20:40:32,261 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-12T20:40:32,261 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734036032261"}]},"ts":"9223372036854775807"} 2024-12-12T20:40:32,280 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-12T20:40:32,299 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T20:40:32,307 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 93 msec 2024-12-12T20:40:32,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T20:40:32,348 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 161 completed 2024-12-12T20:40:32,380 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" 2024-12-12T20:40:32,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-12T20:40:32,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" 2024-12-12T20:40:32,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-12T20:40:32,407 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" 2024-12-12T20:40:32,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-12T20:40:32,455 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=800 (was 800), OpenFileDescriptor=793 (was 807), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1522 (was 1473) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 20), AvailableMemoryMB=3545 (was 2345) - AvailableMemoryMB LEAK? - 2024-12-12T20:40:32,455 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-12T20:40:32,477 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=800, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=1522, ProcessCount=11, AvailableMemoryMB=3544 2024-12-12T20:40:32,477 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-12T20:40:32,485 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:40:32,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T20:40:32,491 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:40:32,491 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:32,491 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 162 2024-12-12T20:40:32,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-12T20:40:32,495 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:40:32,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742186_1362 (size=412) 2024-12-12T20:40:32,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742186_1362 (size=412) 2024-12-12T20:40:32,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742186_1362 (size=412) 2024-12-12T20:40:32,524 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3351f5ee31a68057bf3e18961a3311ae, NAME => 'testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:32,525 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 0c7922ecb6828c8d91bd7dd83eb96ffb, NAME => 'testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:32,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742187_1363 (size=73) 2024-12-12T20:40:32,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742187_1363 (size=73) 2024-12-12T20:40:32,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742187_1363 (size=73) 2024-12-12T20:40:32,564 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:32,564 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 3351f5ee31a68057bf3e18961a3311ae, disabling compactions & flushes 2024-12-12T20:40:32,564 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:32,564 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:32,564 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. after waiting 0 ms 2024-12-12T20:40:32,564 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:32,564 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:32,564 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3351f5ee31a68057bf3e18961a3311ae: 2024-12-12T20:40:32,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742188_1364 (size=73) 2024-12-12T20:40:32,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742188_1364 (size=73) 2024-12-12T20:40:32,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742188_1364 (size=73) 2024-12-12T20:40:32,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-12T20:40:32,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-12T20:40:33,018 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:33,018 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing 0c7922ecb6828c8d91bd7dd83eb96ffb, disabling compactions & flushes 2024-12-12T20:40:33,018 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:33,018 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:33,018 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. after waiting 0 ms 2024-12-12T20:40:33,018 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:33,018 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:33,018 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for 0c7922ecb6828c8d91bd7dd83eb96ffb: 2024-12-12T20:40:33,019 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:40:33,020 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734036033019"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734036033019"}]},"ts":"1734036033019"} 2024-12-12T20:40:33,020 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734036033019"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734036033019"}]},"ts":"1734036033019"} 2024-12-12T20:40:33,022 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:40:33,023 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:40:33,023 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036033023"}]},"ts":"1734036033023"} 2024-12-12T20:40:33,024 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-12T20:40:33,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-12T20:40:33,255 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:40:33,256 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:40:33,256 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:40:33,256 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:40:33,256 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:40:33,256 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:40:33,256 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:40:33,256 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:40:33,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3351f5ee31a68057bf3e18961a3311ae, ASSIGN}, {pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0c7922ecb6828c8d91bd7dd83eb96ffb, ASSIGN}] 2024-12-12T20:40:33,258 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0c7922ecb6828c8d91bd7dd83eb96ffb, ASSIGN 2024-12-12T20:40:33,258 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3351f5ee31a68057bf3e18961a3311ae, ASSIGN 2024-12-12T20:40:33,258 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3351f5ee31a68057bf3e18961a3311ae, ASSIGN; state=OFFLINE, location=0a737713f6e3,42407,1734035781742; forceNewPlan=false, retain=false 2024-12-12T20:40:33,258 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0c7922ecb6828c8d91bd7dd83eb96ffb, ASSIGN; state=OFFLINE, location=0a737713f6e3,46859,1734035782031; forceNewPlan=false, retain=false 2024-12-12T20:40:33,408 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:40:33,409 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=3351f5ee31a68057bf3e18961a3311ae, regionState=OPENING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:40:33,409 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=0c7922ecb6828c8d91bd7dd83eb96ffb, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:40:33,410 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; OpenRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:40:33,410 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=163, state=RUNNABLE; OpenRegionProcedure 3351f5ee31a68057bf3e18961a3311ae, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:40:33,562 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:33,562 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:33,564 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:33,564 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => 3351f5ee31a68057bf3e18961a3311ae, NAME => 'testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T20:40:33,564 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:33,564 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. service=AccessControlService 2024-12-12T20:40:33,564 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => 0c7922ecb6828c8d91bd7dd83eb96ffb, NAME => 'testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T20:40:33,565 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:40:33,565 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. service=AccessControlService 2024-12-12T20:40:33,565 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:33,565 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:33,565 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:40:33,565 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:33,565 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:33,565 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:33,565 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:33,565 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:33,565 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:33,566 INFO [StoreOpener-3351f5ee31a68057bf3e18961a3311ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:33,566 INFO [StoreOpener-0c7922ecb6828c8d91bd7dd83eb96ffb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:33,567 INFO [StoreOpener-3351f5ee31a68057bf3e18961a3311ae-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3351f5ee31a68057bf3e18961a3311ae columnFamilyName cf 2024-12-12T20:40:33,567 INFO [StoreOpener-0c7922ecb6828c8d91bd7dd83eb96ffb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c7922ecb6828c8d91bd7dd83eb96ffb columnFamilyName cf 2024-12-12T20:40:33,567 DEBUG [StoreOpener-3351f5ee31a68057bf3e18961a3311ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:33,567 DEBUG [StoreOpener-0c7922ecb6828c8d91bd7dd83eb96ffb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:33,568 INFO [StoreOpener-3351f5ee31a68057bf3e18961a3311ae-1 {}] regionserver.HStore(327): Store=3351f5ee31a68057bf3e18961a3311ae/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:40:33,568 INFO [StoreOpener-0c7922ecb6828c8d91bd7dd83eb96ffb-1 {}] regionserver.HStore(327): Store=0c7922ecb6828c8d91bd7dd83eb96ffb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:40:33,568 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:33,568 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:33,568 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:33,569 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:33,570 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:33,570 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:33,572 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:40:33,572 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:40:33,572 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened 3351f5ee31a68057bf3e18961a3311ae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74042343, jitterRate=0.10331688821315765}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:40:33,572 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened 0c7922ecb6828c8d91bd7dd83eb96ffb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69978649, jitterRate=0.042763128876686096}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:40:33,573 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for 0c7922ecb6828c8d91bd7dd83eb96ffb: 2024-12-12T20:40:33,573 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for 3351f5ee31a68057bf3e18961a3311ae: 2024-12-12T20:40:33,573 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae., pid=166, masterSystemTime=1734036033561 2024-12-12T20:40:33,573 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb., pid=165, masterSystemTime=1734036033561 2024-12-12T20:40:33,574 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:33,574 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:33,575 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=3351f5ee31a68057bf3e18961a3311ae, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:40:33,575 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:33,575 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:33,575 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=0c7922ecb6828c8d91bd7dd83eb96ffb, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:40:33,578 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=163 2024-12-12T20:40:33,578 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=163, state=SUCCESS; OpenRegionProcedure 3351f5ee31a68057bf3e18961a3311ae, server=0a737713f6e3,42407,1734035781742 in 166 msec 2024-12-12T20:40:33,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-12-12T20:40:33,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3351f5ee31a68057bf3e18961a3311ae, ASSIGN in 322 msec 2024-12-12T20:40:33,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; OpenRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb, server=0a737713f6e3,46859,1734035782031 in 167 msec 2024-12-12T20:40:33,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=162 2024-12-12T20:40:33,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0c7922ecb6828c8d91bd7dd83eb96ffb, ASSIGN in 321 msec 2024-12-12T20:40:33,580 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:40:33,580 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036033580"}]},"ts":"1734036033580"} 2024-12-12T20:40:33,581 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-12T20:40:33,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-12T20:40:33,684 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:40:33,684 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-12T20:40:33,686 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T20:40:33,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:33,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:33,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:33,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:33,901 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:33,901 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:33,901 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:33,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:33,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:33,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:33,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:33,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:33,902 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 1.4160 sec 2024-12-12T20:40:34,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-12T20:40:34,600 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 162 completed 2024-12-12T20:40:34,600 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-12T20:40:34,600 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:34,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-12T20:40:34,605 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:34,606 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-12T20:40:34,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T20:40:34,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734036034609 (current time:1734036034609). 2024-12-12T20:40:34,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:40:34,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-12T20:40:34,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:40:34,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x330e7200 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b73a379 2024-12-12T20:40:34,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25a4abad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:34,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:34,627 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43406, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:34,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x330e7200 to 127.0.0.1:52655 2024-12-12T20:40:34,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:34,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3566e617 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1328f3a9 2024-12-12T20:40:34,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13d1dba2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:34,643 DEBUG [hconnection-0x26fa538e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:34,645 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:34,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3566e617 to 127.0.0.1:52655 2024-12-12T20:40:34,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:34,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T20:40:34,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:40:34,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T20:40:34,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-12T20:40:34,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T20:40:34,650 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:40:34,652 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:40:34,654 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:40:34,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742189_1365 (size=185) 2024-12-12T20:40:34,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742189_1365 (size=185) 2024-12-12T20:40:34,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742189_1365 (size=185) 2024-12-12T20:40:34,662 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:40:34,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 3351f5ee31a68057bf3e18961a3311ae}, {pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb}] 2024-12-12T20:40:34,663 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:34,663 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:34,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T20:40:34,814 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:34,814 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:34,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=168 2024-12-12T20:40:34,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=169 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 3351f5ee31a68057bf3e18961a3311ae: 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.HRegion(2538): Flush status journal for 0c7922ecb6828c8d91bd7dd83eb96ffb: 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:40:34,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:40:34,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742191_1367 (size=76) 2024-12-12T20:40:34,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742190_1366 (size=76) 2024-12-12T20:40:34,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742191_1367 (size=76) 2024-12-12T20:40:34,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742191_1367 (size=76) 2024-12-12T20:40:34,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:34,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:34,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742190_1366 (size=76) 2024-12-12T20:40:34,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=169 2024-12-12T20:40:34,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-12T20:40:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-12T20:40:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=169 2024-12-12T20:40:34,839 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:34,839 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:34,839 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:34,839 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:34,841 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=167, state=SUCCESS; SnapshotRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb in 178 msec 2024-12-12T20:40:34,842 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-12T20:40:34,842 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:40:34,842 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; SnapshotRegionProcedure 3351f5ee31a68057bf3e18961a3311ae in 178 msec 2024-12-12T20:40:34,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742190_1366 (size=76) 2024-12-12T20:40:34,843 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:40:34,844 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:40:34,844 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:34,845 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:34,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742192_1368 (size=567) 2024-12-12T20:40:34,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742192_1368 (size=567) 2024-12-12T20:40:34,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742192_1368 (size=567) 2024-12-12T20:40:34,881 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:40:34,885 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:40:34,886 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:34,887 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:40:34,888 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-12T20:40:34,889 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 240 msec 2024-12-12T20:40:34,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T20:40:34,952 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 167 completed 2024-12-12T20:40:34,957 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42407 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:40:34,957 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46859 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:40:34,960 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-12T20:40:34,960 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:34,960 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:34,972 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T20:40:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734036034972 (current time:1734036034972). 2024-12-12T20:40:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:40:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-12T20:40:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:40:34,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b6c53e7 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78180e2d 2024-12-12T20:40:34,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b08984a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:34,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:34,985 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43418, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:34,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b6c53e7 to 127.0.0.1:52655 2024-12-12T20:40:34,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:34,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d72a750 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65a3b3b5 2024-12-12T20:40:34,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7af6bd83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:35,001 DEBUG [hconnection-0x1fb1a32d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:35,002 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:35,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d72a750 to 127.0.0.1:52655 2024-12-12T20:40:35,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:35,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T20:40:35,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:40:35,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=170, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T20:40:35,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-12T20:40:35,007 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:40:35,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-12T20:40:35,008 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:40:35,009 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:40:35,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742193_1369 (size=180) 2024-12-12T20:40:35,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742193_1369 (size=180) 2024-12-12T20:40:35,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742193_1369 (size=180) 2024-12-12T20:40:35,016 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:40:35,016 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 3351f5ee31a68057bf3e18961a3311ae}, {pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb}] 2024-12-12T20:40:35,017 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:35,017 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-12T20:40:35,167 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:35,167 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:35,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=171 2024-12-12T20:40:35,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=172 2024-12-12T20:40:35,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:35,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:35,168 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2837): Flushing 3351f5ee31a68057bf3e18961a3311ae 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-12-12T20:40:35,168 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 0c7922ecb6828c8d91bd7dd83eb96ffb 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-12-12T20:40:35,188 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/.tmp/cf/7f03e0d564b4480b9f70d1eebae3b197 is 71, key is 19afda9d769f99c15ce20dd9666c34c3/cf:q/1734036034957/Put/seqid=0 2024-12-12T20:40:35,192 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/.tmp/cf/68b6fbce9ac64cda8fa519579cc63a58 is 71, key is 02116536c44aad87b06de5f6f1aba0e7/cf:q/1734036034957/Put/seqid=0 2024-12-12T20:40:35,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742194_1370 (size=8052) 2024-12-12T20:40:35,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742194_1370 (size=8052) 2024-12-12T20:40:35,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742194_1370 (size=8052) 2024-12-12T20:40:35,209 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.80 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/.tmp/cf/7f03e0d564b4480b9f70d1eebae3b197 2024-12-12T20:40:35,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742195_1371 (size=5566) 2024-12-12T20:40:35,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742195_1371 (size=5566) 2024-12-12T20:40:35,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742195_1371 (size=5566) 2024-12-12T20:40:35,214 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=467 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/.tmp/cf/68b6fbce9ac64cda8fa519579cc63a58 2024-12-12T20:40:35,214 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/.tmp/cf/7f03e0d564b4480b9f70d1eebae3b197 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/cf/7f03e0d564b4480b9f70d1eebae3b197 2024-12-12T20:40:35,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/.tmp/cf/68b6fbce9ac64cda8fa519579cc63a58 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/cf/68b6fbce9ac64cda8fa519579cc63a58 2024-12-12T20:40:35,218 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/cf/7f03e0d564b4480b9f70d1eebae3b197, entries=43, sequenceid=6, filesize=7.9 K 2024-12-12T20:40:35,219 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for 0c7922ecb6828c8d91bd7dd83eb96ffb in 51ms, sequenceid=6, compaction requested=false 2024-12-12T20:40:35,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-12T20:40:35,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 0c7922ecb6828c8d91bd7dd83eb96ffb: 2024-12-12T20:40:35,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-12T20:40:35,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:35,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:35,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/cf/7f03e0d564b4480b9f70d1eebae3b197] hfiles 2024-12-12T20:40:35,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/cf/7f03e0d564b4480b9f70d1eebae3b197 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:35,223 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/cf/68b6fbce9ac64cda8fa519579cc63a58, entries=7, sequenceid=6, filesize=5.4 K 2024-12-12T20:40:35,224 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(3040): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for 3351f5ee31a68057bf3e18961a3311ae in 55ms, sequenceid=6, compaction requested=false 2024-12-12T20:40:35,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2538): Flush status journal for 3351f5ee31a68057bf3e18961a3311ae: 2024-12-12T20:40:35,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-12T20:40:35,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:35,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:35,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/cf/68b6fbce9ac64cda8fa519579cc63a58] hfiles 2024-12-12T20:40:35,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/cf/68b6fbce9ac64cda8fa519579cc63a58 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:35,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742196_1372 (size=115) 2024-12-12T20:40:35,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742196_1372 (size=115) 2024-12-12T20:40:35,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742196_1372 (size=115) 2024-12-12T20:40:35,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:35,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-12T20:40:35,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-12T20:40:35,229 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:35,229 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:35,231 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; SnapshotRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb in 214 msec 2024-12-12T20:40:35,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742197_1373 (size=115) 2024-12-12T20:40:35,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742197_1373 (size=115) 2024-12-12T20:40:35,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742197_1373 (size=115) 2024-12-12T20:40:35,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:35,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=171 2024-12-12T20:40:35,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=171 2024-12-12T20:40:35,233 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:35,233 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:35,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-12-12T20:40:35,234 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:40:35,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; SnapshotRegionProcedure 3351f5ee31a68057bf3e18961a3311ae in 217 msec 2024-12-12T20:40:35,235 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:40:35,247 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:40:35,247 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:35,248 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:35,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742198_1374 (size=645) 2024-12-12T20:40:35,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742198_1374 (size=645) 2024-12-12T20:40:35,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742198_1374 (size=645) 2024-12-12T20:40:35,265 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:40:35,269 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:40:35,270 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:35,271 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:40:35,271 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-12T20:40:35,273 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 267 msec 2024-12-12T20:40:35,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-12T20:40:35,309 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 170 completed 2024-12-12T20:40:35,309 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036035309 2024-12-12T20:40:35,310 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37615, tgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036035309, rawTgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036035309, srcFsUri=hdfs://localhost:37615, srcDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:35,347 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37615, inputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:35,347 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036035309, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036035309/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:35,349 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-12T20:40:35,360 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036035309/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:35,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742200_1376 (size=567) 2024-12-12T20:40:35,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742199_1375 (size=185) 2024-12-12T20:40:35,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742200_1376 (size=567) 2024-12-12T20:40:35,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742199_1375 (size=185) 2024-12-12T20:40:35,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742199_1375 (size=185) 2024-12-12T20:40:35,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742200_1376 (size=567) 2024-12-12T20:40:35,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:35,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:35,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:35,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:36,502 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-1010752974598162218.jar 2024-12-12T20:40:36,502 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:36,503 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:36,573 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-18036569269752437430.jar 2024-12-12T20:40:36,573 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:36,573 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:36,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:36,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:36,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:36,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:36,575 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T20:40:36,575 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T20:40:36,575 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T20:40:36,576 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T20:40:36,576 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T20:40:36,576 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T20:40:36,576 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-12T20:40:36,577 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T20:40:36,577 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T20:40:36,577 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T20:40:36,577 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T20:40:36,577 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T20:40:36,578 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:40:36,578 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:40:36,578 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:40:36,578 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:40:36,579 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:40:36,579 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:40:36,579 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:40:36,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742201_1377 (size=127628) 2024-12-12T20:40:36,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742201_1377 (size=127628) 2024-12-12T20:40:36,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742201_1377 (size=127628) 2024-12-12T20:40:36,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742202_1378 (size=2172101) 2024-12-12T20:40:36,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742202_1378 (size=2172101) 2024-12-12T20:40:36,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742202_1378 (size=2172101) 2024-12-12T20:40:36,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742203_1379 (size=213228) 2024-12-12T20:40:36,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742203_1379 (size=213228) 2024-12-12T20:40:36,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742203_1379 (size=213228) 2024-12-12T20:40:37,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742204_1380 (size=1877034) 2024-12-12T20:40:37,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742204_1380 (size=1877034) 2024-12-12T20:40:37,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742204_1380 (size=1877034) 2024-12-12T20:40:37,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742205_1381 (size=6350857) 2024-12-12T20:40:37,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742205_1381 (size=6350857) 2024-12-12T20:40:37,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742205_1381 (size=6350857) 2024-12-12T20:40:37,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742206_1382 (size=533455) 2024-12-12T20:40:37,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742206_1382 (size=533455) 2024-12-12T20:40:37,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742206_1382 (size=533455) 2024-12-12T20:40:37,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742207_1383 (size=7280644) 2024-12-12T20:40:37,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742207_1383 (size=7280644) 2024-12-12T20:40:37,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742207_1383 (size=7280644) 2024-12-12T20:40:37,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742208_1384 (size=4188619) 2024-12-12T20:40:37,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742208_1384 (size=4188619) 2024-12-12T20:40:37,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742208_1384 (size=4188619) 2024-12-12T20:40:37,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742209_1385 (size=20406) 2024-12-12T20:40:37,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742209_1385 (size=20406) 2024-12-12T20:40:37,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742209_1385 (size=20406) 2024-12-12T20:40:37,616 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:40:37,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742210_1386 (size=75495) 2024-12-12T20:40:37,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742210_1386 (size=75495) 2024-12-12T20:40:37,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742210_1386 (size=75495) 2024-12-12T20:40:37,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742211_1387 (size=45609) 2024-12-12T20:40:37,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742211_1387 (size=45609) 2024-12-12T20:40:37,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742211_1387 (size=45609) 2024-12-12T20:40:37,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742212_1388 (size=110084) 2024-12-12T20:40:37,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742212_1388 (size=110084) 2024-12-12T20:40:37,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742212_1388 (size=110084) 2024-12-12T20:40:37,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742213_1389 (size=1323991) 2024-12-12T20:40:37,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742213_1389 (size=1323991) 2024-12-12T20:40:37,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742213_1389 (size=1323991) 2024-12-12T20:40:37,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742214_1390 (size=23076) 2024-12-12T20:40:37,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742214_1390 (size=23076) 2024-12-12T20:40:37,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742214_1390 (size=23076) 2024-12-12T20:40:37,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742215_1391 (size=126803) 2024-12-12T20:40:37,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742215_1391 (size=126803) 2024-12-12T20:40:37,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742215_1391 (size=126803) 2024-12-12T20:40:37,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742216_1392 (size=322274) 2024-12-12T20:40:37,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742216_1392 (size=322274) 2024-12-12T20:40:37,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742216_1392 (size=322274) 2024-12-12T20:40:37,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742217_1393 (size=1832290) 2024-12-12T20:40:37,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742217_1393 (size=1832290) 2024-12-12T20:40:37,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742217_1393 (size=1832290) 2024-12-12T20:40:37,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742218_1394 (size=30081) 2024-12-12T20:40:37,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742218_1394 (size=30081) 2024-12-12T20:40:37,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742218_1394 (size=30081) 2024-12-12T20:40:37,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742219_1395 (size=53616) 2024-12-12T20:40:37,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742219_1395 (size=53616) 2024-12-12T20:40:37,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742219_1395 (size=53616) 2024-12-12T20:40:37,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742220_1396 (size=29229) 2024-12-12T20:40:37,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742220_1396 (size=29229) 2024-12-12T20:40:37,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742220_1396 (size=29229) 2024-12-12T20:40:37,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742221_1397 (size=169089) 2024-12-12T20:40:37,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742221_1397 (size=169089) 2024-12-12T20:40:37,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742221_1397 (size=169089) 2024-12-12T20:40:38,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742222_1398 (size=451756) 2024-12-12T20:40:38,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742222_1398 (size=451756) 2024-12-12T20:40:38,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742222_1398 (size=451756) 2024-12-12T20:40:38,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742223_1399 (size=5175431) 2024-12-12T20:40:38,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742223_1399 (size=5175431) 2024-12-12T20:40:38,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742223_1399 (size=5175431) 2024-12-12T20:40:38,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742224_1400 (size=136454) 2024-12-12T20:40:38,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742224_1400 (size=136454) 2024-12-12T20:40:38,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742224_1400 (size=136454) 2024-12-12T20:40:38,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742225_1401 (size=907861) 2024-12-12T20:40:38,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742225_1401 (size=907861) 2024-12-12T20:40:38,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742225_1401 (size=907861) 2024-12-12T20:40:38,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742226_1402 (size=3317408) 2024-12-12T20:40:38,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742226_1402 (size=3317408) 2024-12-12T20:40:38,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742226_1402 (size=3317408) 2024-12-12T20:40:38,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742227_1403 (size=503880) 2024-12-12T20:40:38,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742227_1403 (size=503880) 2024-12-12T20:40:38,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742227_1403 (size=503880) 2024-12-12T20:40:38,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742228_1404 (size=4695811) 2024-12-12T20:40:38,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742228_1404 (size=4695811) 2024-12-12T20:40:38,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742228_1404 (size=4695811) 2024-12-12T20:40:38,427 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T20:40:38,432 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-12T20:40:38,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742229_1405 (size=7) 2024-12-12T20:40:38,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742229_1405 (size=7) 2024-12-12T20:40:38,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742229_1405 (size=7) 2024-12-12T20:40:38,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742230_1406 (size=10) 2024-12-12T20:40:38,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742230_1406 (size=10) 2024-12-12T20:40:38,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742230_1406 (size=10) 2024-12-12T20:40:38,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742231_1407 (size=304788) 2024-12-12T20:40:38,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742231_1407 (size=304788) 2024-12-12T20:40:38,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742231_1407 (size=304788) 2024-12-12T20:40:38,492 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:40:38,492 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:40:38,802 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0007_000001 (auth:SIMPLE) from 127.0.0.1:50226 2024-12-12T20:40:41,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-12T20:40:41,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-12T20:40:41,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-12T20:40:45,881 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0007_000001 (auth:SIMPLE) from 127.0.0.1:46690 2024-12-12T20:40:46,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742232_1408 (size=350438) 2024-12-12T20:40:46,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742232_1408 (size=350438) 2024-12-12T20:40:46,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742232_1408 (size=350438) 2024-12-12T20:40:46,629 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:40:46,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742233_1409 (size=8568) 2024-12-12T20:40:46,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742233_1409 (size=8568) 2024-12-12T20:40:46,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742233_1409 (size=8568) 2024-12-12T20:40:46,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742234_1410 (size=460) 2024-12-12T20:40:46,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742234_1410 (size=460) 2024-12-12T20:40:46,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742234_1410 (size=460) 2024-12-12T20:40:47,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742235_1411 (size=8568) 2024-12-12T20:40:47,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742235_1411 (size=8568) 2024-12-12T20:40:47,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742235_1411 (size=8568) 2024-12-12T20:40:47,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742236_1412 (size=350438) 2024-12-12T20:40:47,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742236_1412 (size=350438) 2024-12-12T20:40:47,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742236_1412 (size=350438) 2024-12-12T20:40:48,659 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-12T20:40:48,668 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-12T20:40:48,696 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:48,696 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-12T20:40:48,697 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-12T20:40:48,698 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:48,698 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-12T20:40:48,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-12T20:40:48,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036035309/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036035309/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:48,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036035309/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-12T20:40:48,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036035309/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-12T20:40:48,713 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testEmptyExportFileSystemState 2024-12-12T20:40:48,713 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-12T20:40:48,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T20:40:48,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-12T20:40:48,718 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036048718"}]},"ts":"1734036048718"} 2024-12-12T20:40:48,720 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-12T20:40:48,757 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-12T20:40:48,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-12T20:40:48,760 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3351f5ee31a68057bf3e18961a3311ae, UNASSIGN}, {pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0c7922ecb6828c8d91bd7dd83eb96ffb, UNASSIGN}] 2024-12-12T20:40:48,762 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0c7922ecb6828c8d91bd7dd83eb96ffb, UNASSIGN 2024-12-12T20:40:48,762 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3351f5ee31a68057bf3e18961a3311ae, UNASSIGN 2024-12-12T20:40:48,763 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=3351f5ee31a68057bf3e18961a3311ae, regionState=CLOSING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:40:48,763 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=0c7922ecb6828c8d91bd7dd83eb96ffb, regionState=CLOSING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:40:48,765 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:40:48,765 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=175, state=RUNNABLE; CloseRegionProcedure 3351f5ee31a68057bf3e18961a3311ae, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:40:48,765 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:40:48,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=176, state=RUNNABLE; CloseRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:40:48,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-12T20:40:48,918 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:48,918 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(124): Close 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:48,918 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:40:48,918 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1681): Closing 3351f5ee31a68057bf3e18961a3311ae, disabling compactions & flushes 2024-12-12T20:40:48,918 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:48,919 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:48,919 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. after waiting 0 ms 2024-12-12T20:40:48,919 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:48,920 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:48,920 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:48,920 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:40:48,920 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing 0c7922ecb6828c8d91bd7dd83eb96ffb, disabling compactions & flushes 2024-12-12T20:40:48,920 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:48,920 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:48,920 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. after waiting 0 ms 2024-12-12T20:40:48,920 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:48,932 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:40:48,932 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:40:48,933 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:40:48,933 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae. 2024-12-12T20:40:48,933 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:40:48,933 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1635): Region close journal for 3351f5ee31a68057bf3e18961a3311ae: 2024-12-12T20:40:48,933 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb. 2024-12-12T20:40:48,933 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for 0c7922ecb6828c8d91bd7dd83eb96ffb: 2024-12-12T20:40:48,934 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed 0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:48,934 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=0c7922ecb6828c8d91bd7dd83eb96ffb, regionState=CLOSED 2024-12-12T20:40:48,935 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(170): Closed 3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:48,935 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=3351f5ee31a68057bf3e18961a3311ae, regionState=CLOSED 2024-12-12T20:40:48,938 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=176 2024-12-12T20:40:48,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=175 2024-12-12T20:40:48,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=176, state=SUCCESS; CloseRegionProcedure 0c7922ecb6828c8d91bd7dd83eb96ffb, server=0a737713f6e3,46859,1734035782031 in 172 msec 2024-12-12T20:40:48,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=175, state=SUCCESS; CloseRegionProcedure 3351f5ee31a68057bf3e18961a3311ae, server=0a737713f6e3,42407,1734035781742 in 172 msec 2024-12-12T20:40:48,940 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=0c7922ecb6828c8d91bd7dd83eb96ffb, UNASSIGN in 178 msec 2024-12-12T20:40:48,941 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=175, resume processing ppid=174 2024-12-12T20:40:48,941 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3351f5ee31a68057bf3e18961a3311ae, UNASSIGN in 179 msec 2024-12-12T20:40:48,944 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-12T20:40:48,944 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 183 msec 2024-12-12T20:40:48,945 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036048945"}]},"ts":"1734036048945"} 2024-12-12T20:40:48,946 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-12T20:40:48,957 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-12T20:40:48,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 245 msec 2024-12-12T20:40:49,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-12T20:40:49,019 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 173 completed 2024-12-12T20:40:49,020 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,027 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,028 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,029 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,035 DEBUG [HFileArchiver-35 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:49,035 DEBUG [HFileArchiver-36 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:49,037 DEBUG [HFileArchiver-36 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/recovered.edits] 2024-12-12T20:40:49,038 DEBUG [HFileArchiver-35 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/recovered.edits] 2024-12-12T20:40:49,044 DEBUG [HFileArchiver-34 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/cf/7f03e0d564b4480b9f70d1eebae3b197 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/cf/7f03e0d564b4480b9f70d1eebae3b197 2024-12-12T20:40:49,044 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/cf/68b6fbce9ac64cda8fa519579cc63a58 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/cf/68b6fbce9ac64cda8fa519579cc63a58 2024-12-12T20:40:49,048 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb/recovered.edits/9.seqid 2024-12-12T20:40:49,048 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae/recovered.edits/9.seqid 2024-12-12T20:40:49,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,049 DEBUG [HFileArchiver-36 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/0c7922ecb6828c8d91bd7dd83eb96ffb 2024-12-12T20:40:49,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,049 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-12T20:40:49,050 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-12T20:40:49,052 DEBUG [HFileArchiver-35 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testEmptyExportFileSystemState/3351f5ee31a68057bf3e18961a3311ae 2024-12-12T20:40:49,052 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-12T20:40:49,054 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:49,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:49,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:49,057 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-12-12T20:40:49,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,057 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-12-12T20:40:49,057 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-12T20:40:49,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:49,057 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-12T20:40:49,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-12T20:40:49,058 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,058 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,058 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,066 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-12T20:40:49,068 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-12T20:40:49,073 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,074 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-12T20:40:49,074 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734036049074"}]},"ts":"9223372036854775807"} 2024-12-12T20:40:49,074 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734036049074"}]},"ts":"9223372036854775807"} 2024-12-12T20:40:49,077 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-12T20:40:49,077 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3351f5ee31a68057bf3e18961a3311ae, NAME => 'testtb-testEmptyExportFileSystemState,,1734036032484.3351f5ee31a68057bf3e18961a3311ae.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 0c7922ecb6828c8d91bd7dd83eb96ffb, NAME => 'testtb-testEmptyExportFileSystemState,1,1734036032484.0c7922ecb6828c8d91bd7dd83eb96ffb.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T20:40:49,078 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-12T20:40:49,078 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734036049078"}]},"ts":"9223372036854775807"} 2024-12-12T20:40:49,081 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-12T20:40:49,091 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T20:40:49,094 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 71 msec 2024-12-12T20:40:49,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-12T20:40:49,159 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 179 completed 2024-12-12T20:40:49,167 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" 2024-12-12T20:40:49,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:49,170 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" 2024-12-12T20:40:49,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-12T20:40:49,195 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=809 (was 800) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-44 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1715629052_1 at /127.0.0.1:52364 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-35 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5455 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-42 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46101 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:36089 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:40403 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-43 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:57674 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-41 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36089 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:52386 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-36 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:52782 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:46101 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 70847) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=808 (was 793) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1348 (was 1522), ProcessCount=15 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=1935 (was 3544) 2024-12-12T20:40:49,195 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-12-12T20:40:49,212 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=809, OpenFileDescriptor=808, MaxFileDescriptor=1048576, SystemLoadAverage=1348, ProcessCount=15, AvailableMemoryMB=1933 2024-12-12T20:40:49,212 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-12-12T20:40:49,214 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:40:49,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-12T20:40:49,216 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:40:49,216 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:49,216 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 180 2024-12-12T20:40:49,217 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:40:49,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-12T20:40:49,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742237_1413 (size=404) 2024-12-12T20:40:49,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742237_1413 (size=404) 2024-12-12T20:40:49,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742237_1413 (size=404) 2024-12-12T20:40:49,228 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 08f57703150ab9ef20f2776475b12ad2, NAME => 'testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:49,228 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 376feae6d5935ea89b8dc97bd6e805f8, NAME => 'testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:49,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742239_1415 (size=65) 2024-12-12T20:40:49,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742239_1415 (size=65) 2024-12-12T20:40:49,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742238_1414 (size=65) 2024-12-12T20:40:49,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742239_1415 (size=65) 2024-12-12T20:40:49,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742238_1414 (size=65) 2024-12-12T20:40:49,258 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:49,258 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1681): Closing 376feae6d5935ea89b8dc97bd6e805f8, disabling compactions & flushes 2024-12-12T20:40:49,258 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:40:49,258 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:40:49,258 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. after waiting 0 ms 2024-12-12T20:40:49,258 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:40:49,258 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:40:49,258 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1635): Region close journal for 376feae6d5935ea89b8dc97bd6e805f8: 2024-12-12T20:40:49,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742238_1414 (size=65) 2024-12-12T20:40:49,260 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:49,260 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1681): Closing 08f57703150ab9ef20f2776475b12ad2, disabling compactions & flushes 2024-12-12T20:40:49,260 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:49,260 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:49,260 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. after waiting 0 ms 2024-12-12T20:40:49,260 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:49,260 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:49,260 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1635): Region close journal for 08f57703150ab9ef20f2776475b12ad2: 2024-12-12T20:40:49,261 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:40:49,262 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734036049262"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734036049262"}]},"ts":"1734036049262"} 2024-12-12T20:40:49,262 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734036049262"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734036049262"}]},"ts":"1734036049262"} 2024-12-12T20:40:49,264 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:40:49,265 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:40:49,265 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036049265"}]},"ts":"1734036049265"} 2024-12-12T20:40:49,266 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-12T20:40:49,282 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:40:49,284 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:40:49,284 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:40:49,284 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:40:49,284 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:40:49,284 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:40:49,284 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:40:49,284 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:40:49,284 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=08f57703150ab9ef20f2776475b12ad2, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=376feae6d5935ea89b8dc97bd6e805f8, ASSIGN}] 2024-12-12T20:40:49,285 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=376feae6d5935ea89b8dc97bd6e805f8, ASSIGN 2024-12-12T20:40:49,285 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=08f57703150ab9ef20f2776475b12ad2, ASSIGN 2024-12-12T20:40:49,286 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=376feae6d5935ea89b8dc97bd6e805f8, ASSIGN; state=OFFLINE, location=0a737713f6e3,42407,1734035781742; forceNewPlan=false, retain=false 2024-12-12T20:40:49,286 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=08f57703150ab9ef20f2776475b12ad2, ASSIGN; state=OFFLINE, location=0a737713f6e3,46859,1734035782031; forceNewPlan=false, retain=false 2024-12-12T20:40:49,300 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:40:49,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-12T20:40:49,436 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:40:49,437 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=376feae6d5935ea89b8dc97bd6e805f8, regionState=OPENING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:40:49,437 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=08f57703150ab9ef20f2776475b12ad2, regionState=OPENING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:40:49,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE; OpenRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:40:49,440 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE; OpenRegionProcedure 08f57703150ab9ef20f2776475b12ad2, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:40:49,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-12T20:40:49,591 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:49,592 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:49,593 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:40:49,593 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:49,593 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7285): Opening region: {ENCODED => 376feae6d5935ea89b8dc97bd6e805f8, NAME => 'testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T20:40:49,593 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7285): Opening region: {ENCODED => 08f57703150ab9ef20f2776475b12ad2, NAME => 'testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T20:40:49,594 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. service=AccessControlService 2024-12-12T20:40:49,594 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:40:49,594 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:49,594 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:49,594 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7327): checking encryption for 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:49,594 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7330): checking classloading for 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:49,595 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. service=AccessControlService 2024-12-12T20:40:49,595 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:40:49,595 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:49,595 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:40:49,595 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7327): checking encryption for 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:49,595 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7330): checking classloading for 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:49,596 INFO [StoreOpener-08f57703150ab9ef20f2776475b12ad2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:49,598 INFO [StoreOpener-08f57703150ab9ef20f2776475b12ad2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 08f57703150ab9ef20f2776475b12ad2 columnFamilyName cf 2024-12-12T20:40:49,598 DEBUG [StoreOpener-08f57703150ab9ef20f2776475b12ad2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:49,598 INFO [StoreOpener-08f57703150ab9ef20f2776475b12ad2-1 {}] regionserver.HStore(327): Store=08f57703150ab9ef20f2776475b12ad2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:40:49,599 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:49,599 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:49,600 INFO [StoreOpener-376feae6d5935ea89b8dc97bd6e805f8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:49,601 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1085): writing seq id for 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:49,601 INFO [StoreOpener-376feae6d5935ea89b8dc97bd6e805f8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 376feae6d5935ea89b8dc97bd6e805f8 columnFamilyName cf 2024-12-12T20:40:49,601 DEBUG [StoreOpener-376feae6d5935ea89b8dc97bd6e805f8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:40:49,601 INFO [StoreOpener-376feae6d5935ea89b8dc97bd6e805f8-1 {}] regionserver.HStore(327): Store=376feae6d5935ea89b8dc97bd6e805f8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:40:49,602 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:49,602 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:40:49,603 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1102): Opened 08f57703150ab9ef20f2776475b12ad2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66894515, jitterRate=-0.0031940490007400513}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:40:49,603 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:49,603 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1001): Region open journal for 08f57703150ab9ef20f2776475b12ad2: 2024-12-12T20:40:49,604 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1085): writing seq id for 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:49,607 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:40:49,607 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1102): Opened 376feae6d5935ea89b8dc97bd6e805f8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64895110, jitterRate=-0.032987505197525024}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:40:49,608 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1001): Region open journal for 376feae6d5935ea89b8dc97bd6e805f8: 2024-12-12T20:40:49,608 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2., pid=184, masterSystemTime=1734036049591 2024-12-12T20:40:49,608 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8., pid=183, masterSystemTime=1734036049590 2024-12-12T20:40:49,609 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:49,609 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:49,610 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=08f57703150ab9ef20f2776475b12ad2, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:40:49,610 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:40:49,610 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:40:49,610 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=376feae6d5935ea89b8dc97bd6e805f8, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:40:49,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=181 2024-12-12T20:40:49,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182 2024-12-12T20:40:49,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; OpenRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8, server=0a737713f6e3,42407,1734035781742 in 173 msec 2024-12-12T20:40:49,615 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=181, state=SUCCESS; OpenRegionProcedure 08f57703150ab9ef20f2776475b12ad2, server=0a737713f6e3,46859,1734035782031 in 171 msec 2024-12-12T20:40:49,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=08f57703150ab9ef20f2776475b12ad2, ASSIGN in 329 msec 2024-12-12T20:40:49,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=180 2024-12-12T20:40:49,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=376feae6d5935ea89b8dc97bd6e805f8, ASSIGN in 331 msec 2024-12-12T20:40:49,617 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:40:49,617 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036049617"}]},"ts":"1734036049617"} 2024-12-12T20:40:49,619 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-12T20:40:49,661 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:40:49,661 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-12T20:40:49,663 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-12T20:40:49,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:49,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:49,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:49,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:40:49,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T20:40:49,683 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithChecksum in 468 msec 2024-12-12T20:40:49,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-12T20:40:49,820 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum, procId: 180 completed 2024-12-12T20:40:49,820 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-12T20:40:49,820 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:49,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-12T20:40:49,828 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:49,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithChecksum assigned. 2024-12-12T20:40:49,832 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-12T20:40:49,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734036049832 (current time:1734036049832). 2024-12-12T20:40:49,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:40:49,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-12T20:40:49,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:40:49,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x43eb491a to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@73fa8918 2024-12-12T20:40:49,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45121645, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:49,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:49,856 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:49,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x43eb491a to 127.0.0.1:52655 2024-12-12T20:40:49,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:49,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x23f67838 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e39717 2024-12-12T20:40:49,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@511609f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:49,876 DEBUG [hconnection-0x9adb565-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:49,877 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:49,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x23f67838 to 127.0.0.1:52655 2024-12-12T20:40:49,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:49,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-12T20:40:49,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:40:49,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-12T20:40:49,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-12T20:40:49,889 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:40:49,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-12T20:40:49,890 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:40:49,893 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:40:49,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742240_1416 (size=161) 2024-12-12T20:40:49,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742240_1416 (size=161) 2024-12-12T20:40:49,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742240_1416 (size=161) 2024-12-12T20:40:49,901 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:40:49,902 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 08f57703150ab9ef20f2776475b12ad2}, {pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8}] 2024-12-12T20:40:49,902 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:49,903 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:49,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-12T20:40:50,005 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-12T20:40:50,054 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:50,054 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:50,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-12T20:40:50,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-12T20:40:50,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:40:50,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2538): Flush status journal for 376feae6d5935ea89b8dc97bd6e805f8: 2024-12-12T20:40:50,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. for emptySnaptb0-testExportWithChecksum completed. 2024-12-12T20:40:50,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-12T20:40:50,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:50,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:40:50,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:50,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for 08f57703150ab9ef20f2776475b12ad2: 2024-12-12T20:40:50,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. for emptySnaptb0-testExportWithChecksum completed. 2024-12-12T20:40:50,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-12T20:40:50,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:50,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:40:50,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742242_1418 (size=68) 2024-12-12T20:40:50,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742242_1418 (size=68) 2024-12-12T20:40:50,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742242_1418 (size=68) 2024-12-12T20:40:50,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:50,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-12T20:40:50,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-12T20:40:50,107 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:50,108 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:50,116 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; SnapshotRegionProcedure 08f57703150ab9ef20f2776475b12ad2 in 208 msec 2024-12-12T20:40:50,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742241_1417 (size=68) 2024-12-12T20:40:50,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742241_1417 (size=68) 2024-12-12T20:40:50,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742241_1417 (size=68) 2024-12-12T20:40:50,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:40:50,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-12T20:40:50,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=187 2024-12-12T20:40:50,128 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:50,128 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:50,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=187, resume processing ppid=185 2024-12-12T20:40:50,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, ppid=185, state=SUCCESS; SnapshotRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8 in 227 msec 2024-12-12T20:40:50,131 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:40:50,132 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:40:50,134 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:40:50,134 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-12T20:40:50,136 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-12T20:40:50,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742243_1419 (size=543) 2024-12-12T20:40:50,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742243_1419 (size=543) 2024-12-12T20:40:50,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742243_1419 (size=543) 2024-12-12T20:40:50,175 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:40:50,180 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:40:50,183 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-12T20:40:50,185 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:40:50,185 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-12T20:40:50,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 297 msec 2024-12-12T20:40:50,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-12T20:40:50,191 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 185 completed 2024-12-12T20:40:50,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42407 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:40:50,218 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46859 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:40:50,221 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithChecksum 2024-12-12T20:40:50,221 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:50,222 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:40:50,247 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-12T20:40:50,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734036050247 (current time:1734036050247). 2024-12-12T20:40:50,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:40:50,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-12T20:40:50,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:40:50,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60719c99 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4131b6f 2024-12-12T20:40:50,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c0fe486, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:50,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:50,300 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50924, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:50,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60719c99 to 127.0.0.1:52655 2024-12-12T20:40:50,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:50,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6b12076a to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@77c6b104 2024-12-12T20:40:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@445e50ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:40:50,355 DEBUG [hconnection-0x1dde0779-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:40:50,356 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50936, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:40:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6b12076a to 127.0.0.1:52655 2024-12-12T20:40:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:40:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-12T20:40:50,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:40:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-12T20:40:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-12T20:40:50,362 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:40:50,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-12T20:40:50,363 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:40:50,370 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:40:50,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742244_1420 (size=156) 2024-12-12T20:40:50,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742244_1420 (size=156) 2024-12-12T20:40:50,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742244_1420 (size=156) 2024-12-12T20:40:50,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-12T20:40:50,466 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:40:50,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 08f57703150ab9ef20f2776475b12ad2}, {pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8}] 2024-12-12T20:40:50,468 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:50,468 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:50,620 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:40:50,620 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:40:50,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190 2024-12-12T20:40:50,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:40:50,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46859 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189 2024-12-12T20:40:50,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:50,623 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2837): Flushing 08f57703150ab9ef20f2776475b12ad2 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-12T20:40:50,623 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing 376feae6d5935ea89b8dc97bd6e805f8 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-12T20:40:50,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-12T20:40:50,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/.tmp/cf/59c9d271eb3d49f9ade34794a3fc645f is 71, key is 0276fda9e00a3144c9f569fd99c6c407/cf:q/1734036050218/Put/seqid=0 2024-12-12T20:40:50,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/.tmp/cf/13997210ed5a404ebb63ba0ce5f8491b is 71, key is 13df9e38679e8acb9502c69e1344d220/cf:q/1734036050217/Put/seqid=0 2024-12-12T20:40:50,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742245_1421 (size=5490) 2024-12-12T20:40:50,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742245_1421 (size=5490) 2024-12-12T20:40:50,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742245_1421 (size=5490) 2024-12-12T20:40:50,770 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/.tmp/cf/59c9d271eb3d49f9ade34794a3fc645f 2024-12-12T20:40:50,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/.tmp/cf/59c9d271eb3d49f9ade34794a3fc645f as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/cf/59c9d271eb3d49f9ade34794a3fc645f 2024-12-12T20:40:50,789 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/cf/59c9d271eb3d49f9ade34794a3fc645f, entries=6, sequenceid=6, filesize=5.4 K 2024-12-12T20:40:50,790 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3040): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 08f57703150ab9ef20f2776475b12ad2 in 167ms, sequenceid=6, compaction requested=false 2024-12-12T20:40:50,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2538): Flush status journal for 08f57703150ab9ef20f2776475b12ad2: 2024-12-12T20:40:50,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. for snaptb0-testExportWithChecksum completed. 2024-12-12T20:40:50,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-12T20:40:50,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:50,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/cf/59c9d271eb3d49f9ade34794a3fc645f] hfiles 2024-12-12T20:40:50,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/cf/59c9d271eb3d49f9ade34794a3fc645f for snapshot=snaptb0-testExportWithChecksum 2024-12-12T20:40:50,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742246_1422 (size=8120) 2024-12-12T20:40:50,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742246_1422 (size=8120) 2024-12-12T20:40:50,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742246_1422 (size=8120) 2024-12-12T20:40:50,803 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/.tmp/cf/13997210ed5a404ebb63ba0ce5f8491b 2024-12-12T20:40:50,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/.tmp/cf/13997210ed5a404ebb63ba0ce5f8491b as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b 2024-12-12T20:40:50,832 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b, entries=44, sequenceid=6, filesize=7.9 K 2024-12-12T20:40:50,836 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 376feae6d5935ea89b8dc97bd6e805f8 in 212ms, sequenceid=6, compaction requested=false 2024-12-12T20:40:50,836 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for 376feae6d5935ea89b8dc97bd6e805f8: 2024-12-12T20:40:50,836 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. for snaptb0-testExportWithChecksum completed. 2024-12-12T20:40:50,836 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-12T20:40:50,836 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:40:50,836 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b] hfiles 2024-12-12T20:40:50,836 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b for snapshot=snaptb0-testExportWithChecksum 2024-12-12T20:40:50,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742247_1423 (size=107) 2024-12-12T20:40:50,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742247_1423 (size=107) 2024-12-12T20:40:50,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742247_1423 (size=107) 2024-12-12T20:40:50,867 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:40:50,867 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189 2024-12-12T20:40:50,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=189 2024-12-12T20:40:50,868 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:50,868 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:40:50,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, ppid=188, state=SUCCESS; SnapshotRegionProcedure 08f57703150ab9ef20f2776475b12ad2 in 402 msec 2024-12-12T20:40:50,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742248_1424 (size=107) 2024-12-12T20:40:50,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742248_1424 (size=107) 2024-12-12T20:40:50,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742248_1424 (size=107) 2024-12-12T20:40:50,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:40:50,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-12T20:40:50,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-12-12T20:40:50,922 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:50,922 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:40:50,926 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=188 2024-12-12T20:40:50,926 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:40:50,926 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=188, state=SUCCESS; SnapshotRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8 in 458 msec 2024-12-12T20:40:50,929 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:40:50,930 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:40:50,931 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-12T20:40:50,931 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-12T20:40:50,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-12T20:40:50,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742249_1425 (size=621) 2024-12-12T20:40:50,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742249_1425 (size=621) 2024-12-12T20:40:50,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742249_1425 (size=621) 2024-12-12T20:40:50,989 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:40:50,995 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:40:50,995 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-12T20:40:51,003 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:40:51,004 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-12T20:40:51,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 644 msec 2024-12-12T20:40:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-12T20:40:51,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-12T20:40:51,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-12T20:40:51,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-12T20:40:51,470 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 188 completed 2024-12-12T20:40:51,470 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734036051470 2024-12-12T20:40:51,470 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734036051470, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734036051470, srcFsUri=hdfs://localhost:37615, srcDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:51,513 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37615, inputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:40:51,513 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@1c2405a5, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734036051470, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734036051470/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-12T20:40:51,515 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-12T20:40:51,540 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734036051470/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-12T20:40:51,699 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:51,700 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:51,700 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:51,700 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:53,017 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-263564344920866218.jar 2024-12-12T20:40:53,018 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:53,018 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:53,101 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-5022498872511187384.jar 2024-12-12T20:40:53,101 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:53,102 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:53,102 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:53,102 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:53,103 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:53,103 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-12T20:40:53,103 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T20:40:53,104 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T20:40:53,104 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T20:40:53,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T20:40:53,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T20:40:53,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T20:40:53,106 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-12T20:40:53,106 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T20:40:53,106 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T20:40:53,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T20:40:53,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T20:40:53,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T20:40:53,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:40:53,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:40:53,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:40:53,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:40:53,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:40:53,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:40:53,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:40:53,199 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0007_000001 (auth:SIMPLE) from 127.0.0.1:52978 2024-12-12T20:40:53,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742250_1426 (size=6350857) 2024-12-12T20:40:53,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742250_1426 (size=6350857) 2024-12-12T20:40:53,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742250_1426 (size=6350857) 2024-12-12T20:40:53,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742251_1427 (size=127628) 2024-12-12T20:40:53,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742251_1427 (size=127628) 2024-12-12T20:40:53,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742251_1427 (size=127628) 2024-12-12T20:40:53,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742252_1428 (size=2172101) 2024-12-12T20:40:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742252_1428 (size=2172101) 2024-12-12T20:40:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742252_1428 (size=2172101) 2024-12-12T20:40:53,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742253_1429 (size=213228) 2024-12-12T20:40:53,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742253_1429 (size=213228) 2024-12-12T20:40:53,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742253_1429 (size=213228) 2024-12-12T20:40:53,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742254_1430 (size=1877034) 2024-12-12T20:40:53,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742254_1430 (size=1877034) 2024-12-12T20:40:53,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742254_1430 (size=1877034) 2024-12-12T20:40:53,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742255_1431 (size=451756) 2024-12-12T20:40:53,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742255_1431 (size=451756) 2024-12-12T20:40:53,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742255_1431 (size=451756) 2024-12-12T20:40:53,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742256_1432 (size=533455) 2024-12-12T20:40:53,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742256_1432 (size=533455) 2024-12-12T20:40:53,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742256_1432 (size=533455) 2024-12-12T20:40:53,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742257_1433 (size=7280644) 2024-12-12T20:40:53,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742257_1433 (size=7280644) 2024-12-12T20:40:53,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742257_1433 (size=7280644) 2024-12-12T20:40:53,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742258_1434 (size=4188619) 2024-12-12T20:40:53,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742258_1434 (size=4188619) 2024-12-12T20:40:53,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742258_1434 (size=4188619) 2024-12-12T20:40:53,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742259_1435 (size=20406) 2024-12-12T20:40:53,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742259_1435 (size=20406) 2024-12-12T20:40:53,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742259_1435 (size=20406) 2024-12-12T20:40:53,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742260_1436 (size=75495) 2024-12-12T20:40:53,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742260_1436 (size=75495) 2024-12-12T20:40:53,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742260_1436 (size=75495) 2024-12-12T20:40:53,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742261_1437 (size=45609) 2024-12-12T20:40:53,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742261_1437 (size=45609) 2024-12-12T20:40:53,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742261_1437 (size=45609) 2024-12-12T20:40:53,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742262_1438 (size=110084) 2024-12-12T20:40:53,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742262_1438 (size=110084) 2024-12-12T20:40:53,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742262_1438 (size=110084) 2024-12-12T20:40:53,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742263_1439 (size=1323991) 2024-12-12T20:40:53,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742263_1439 (size=1323991) 2024-12-12T20:40:53,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742263_1439 (size=1323991) 2024-12-12T20:40:53,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742264_1440 (size=23076) 2024-12-12T20:40:53,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742264_1440 (size=23076) 2024-12-12T20:40:53,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742264_1440 (size=23076) 2024-12-12T20:40:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742265_1441 (size=126803) 2024-12-12T20:40:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742265_1441 (size=126803) 2024-12-12T20:40:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742265_1441 (size=126803) 2024-12-12T20:40:53,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742266_1442 (size=322274) 2024-12-12T20:40:53,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742266_1442 (size=322274) 2024-12-12T20:40:53,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742266_1442 (size=322274) 2024-12-12T20:40:53,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742267_1443 (size=1832290) 2024-12-12T20:40:53,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742267_1443 (size=1832290) 2024-12-12T20:40:53,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742267_1443 (size=1832290) 2024-12-12T20:40:53,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742268_1444 (size=30081) 2024-12-12T20:40:53,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742268_1444 (size=30081) 2024-12-12T20:40:53,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742268_1444 (size=30081) 2024-12-12T20:40:53,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742269_1445 (size=53616) 2024-12-12T20:40:53,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742269_1445 (size=53616) 2024-12-12T20:40:53,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742269_1445 (size=53616) 2024-12-12T20:40:53,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742270_1446 (size=29229) 2024-12-12T20:40:53,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742270_1446 (size=29229) 2024-12-12T20:40:53,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742270_1446 (size=29229) 2024-12-12T20:40:53,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742271_1447 (size=169089) 2024-12-12T20:40:53,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742271_1447 (size=169089) 2024-12-12T20:40:53,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742271_1447 (size=169089) 2024-12-12T20:40:53,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742272_1448 (size=5175431) 2024-12-12T20:40:53,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742272_1448 (size=5175431) 2024-12-12T20:40:53,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742272_1448 (size=5175431) 2024-12-12T20:40:53,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742273_1449 (size=136454) 2024-12-12T20:40:53,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742273_1449 (size=136454) 2024-12-12T20:40:53,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742273_1449 (size=136454) 2024-12-12T20:40:53,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742274_1450 (size=907861) 2024-12-12T20:40:53,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742274_1450 (size=907861) 2024-12-12T20:40:53,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742274_1450 (size=907861) 2024-12-12T20:40:53,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742275_1451 (size=3317408) 2024-12-12T20:40:53,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742275_1451 (size=3317408) 2024-12-12T20:40:53,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742275_1451 (size=3317408) 2024-12-12T20:40:53,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742276_1452 (size=503880) 2024-12-12T20:40:53,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742276_1452 (size=503880) 2024-12-12T20:40:53,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742276_1452 (size=503880) 2024-12-12T20:40:53,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742277_1453 (size=4695811) 2024-12-12T20:40:53,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742277_1453 (size=4695811) 2024-12-12T20:40:53,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742277_1453 (size=4695811) 2024-12-12T20:40:53,721 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T20:40:53,724 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-12T20:40:53,726 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-12T20:40:53,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742278_1454 (size=338) 2024-12-12T20:40:53,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742278_1454 (size=338) 2024-12-12T20:40:53,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742278_1454 (size=338) 2024-12-12T20:40:53,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742279_1455 (size=15) 2024-12-12T20:40:53,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742279_1455 (size=15) 2024-12-12T20:40:53,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742279_1455 (size=15) 2024-12-12T20:40:53,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742280_1456 (size=304927) 2024-12-12T20:40:53,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742280_1456 (size=304927) 2024-12-12T20:40:53,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742280_1456 (size=304927) 2024-12-12T20:40:53,805 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:40:53,805 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:40:54,117 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0008_000001 (auth:SIMPLE) from 127.0.0.1:55800 2024-12-12T20:40:54,435 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:40:58,308 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0007/container_1734035798001_0007_01_000001/launch_container.sh] 2024-12-12T20:40:58,308 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0007/container_1734035798001_0007_01_000001/container_tokens] 2024-12-12T20:40:58,309 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_1/usercache/jenkins/appcache/application_1734035798001_0007/container_1734035798001_0007_01_000001/sysfs] 2024-12-12T20:41:00,217 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0008_000001 (auth:SIMPLE) from 127.0.0.1:47772 2024-12-12T20:41:00,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742281_1457 (size=350601) 2024-12-12T20:41:00,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742281_1457 (size=350601) 2024-12-12T20:41:00,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742281_1457 (size=350601) 2024-12-12T20:41:02,446 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0008_000001 (auth:SIMPLE) from 127.0.0.1:42730 2024-12-12T20:41:05,861 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region d5a17aa0c520a1cdf2abb50af9af5441, had cached 0 bytes from a total of 8324 2024-12-12T20:41:05,880 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1c36c2e04ccc3d3187435c7e0ba7d6c6, had cached 0 bytes from a total of 5288 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734036051470/archive/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-12T20:41:07,342 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0008_000001 (auth:SIMPLE) from 127.0.0.1:42744 2024-12-12T20:41:11,590 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000002/launch_container.sh] 2024-12-12T20:41:11,591 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000002/container_tokens] 2024-12-12T20:41:11,591 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000002/sysfs] 2024-12-12T20:41:11,868 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000003/launch_container.sh] 2024-12-12T20:41:11,868 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000003/container_tokens] 2024-12-12T20:41:11,868 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734036051470/archive/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-12T20:41:13,362 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0008_000001 (auth:SIMPLE) from 127.0.0.1:42860 2024-12-12T20:41:17,145 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000004/launch_container.sh] 2024-12-12T20:41:17,145 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000004/container_tokens] 2024-12-12T20:41:17,145 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/local-export-1734036051470/archive/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-12T20:41:18,385 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0008_000001 (auth:SIMPLE) from 127.0.0.1:42874 2024-12-12T20:41:19,301 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:41:21,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742282_1458 (size=21340) 2024-12-12T20:41:21,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742282_1458 (size=21340) 2024-12-12T20:41:21,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742282_1458 (size=21340) 2024-12-12T20:41:21,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742283_1459 (size=460) 2024-12-12T20:41:21,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742283_1459 (size=460) 2024-12-12T20:41:21,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742283_1459 (size=460) 2024-12-12T20:41:21,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742284_1460 (size=21340) 2024-12-12T20:41:21,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742284_1460 (size=21340) 2024-12-12T20:41:21,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742284_1460 (size=21340) 2024-12-12T20:41:21,773 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-12T20:41:21,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742285_1461 (size=350601) 2024-12-12T20:41:21,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742285_1461 (size=350601) 2024-12-12T20:41:21,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742285_1461 (size=350601) 2024-12-12T20:41:21,894 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0008_000001 (auth:SIMPLE) from 127.0.0.1:42548 2024-12-12T20:41:22,060 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-12T20:41:22,418 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-12T20:41:22,680 DEBUG [master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=20, reuseRatio=66.67% 2024-12-12T20:41:22,681 DEBUG [master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-12T20:41:23,069 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1227): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1734035798001_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:935) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1204) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:353) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T20:41:23,070 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036083070 2024-12-12T20:41:23,070 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37615, tgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036083070, rawTgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036083070, srcFsUri=hdfs://localhost:37615, srcDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:41:23,108 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37615, inputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:41:23,108 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036083070, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036083070/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-12T20:41:23,110 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-12T20:41:23,116 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036083070/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-12T20:41:23,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742286_1462 (size=156) 2024-12-12T20:41:23,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742286_1462 (size=156) 2024-12-12T20:41:23,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742286_1462 (size=156) 2024-12-12T20:41:23,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742287_1463 (size=621) 2024-12-12T20:41:23,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742287_1463 (size=621) 2024-12-12T20:41:23,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742287_1463 (size=621) 2024-12-12T20:41:23,157 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:23,157 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:23,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:23,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:24,138 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-12554398768716482677.jar 2024-12-12T20:41:24,138 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:24,138 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:24,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-17023366335561483498.jar 2024-12-12T20:41:24,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:24,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:24,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:24,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:24,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:24,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:24,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T20:41:24,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T20:41:24,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T20:41:24,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T20:41:24,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T20:41:24,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T20:41:24,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-12T20:41:24,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T20:41:24,208 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T20:41:24,208 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T20:41:24,208 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T20:41:24,208 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T20:41:24,208 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:41:24,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:41:24,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:41:24,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:41:24,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:41:24,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:41:24,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:41:24,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742288_1464 (size=127628) 2024-12-12T20:41:24,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742288_1464 (size=127628) 2024-12-12T20:41:24,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742288_1464 (size=127628) 2024-12-12T20:41:24,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742289_1465 (size=2172101) 2024-12-12T20:41:24,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742289_1465 (size=2172101) 2024-12-12T20:41:24,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742289_1465 (size=2172101) 2024-12-12T20:41:24,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742290_1466 (size=213228) 2024-12-12T20:41:24,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742290_1466 (size=213228) 2024-12-12T20:41:24,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742290_1466 (size=213228) 2024-12-12T20:41:24,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742291_1467 (size=1877034) 2024-12-12T20:41:24,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742291_1467 (size=1877034) 2024-12-12T20:41:24,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742291_1467 (size=1877034) 2024-12-12T20:41:24,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742292_1468 (size=533455) 2024-12-12T20:41:24,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742292_1468 (size=533455) 2024-12-12T20:41:24,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742292_1468 (size=533455) 2024-12-12T20:41:25,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742293_1469 (size=7280644) 2024-12-12T20:41:25,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742293_1469 (size=7280644) 2024-12-12T20:41:25,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742293_1469 (size=7280644) 2024-12-12T20:41:25,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742294_1470 (size=4188619) 2024-12-12T20:41:25,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742294_1470 (size=4188619) 2024-12-12T20:41:25,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742294_1470 (size=4188619) 2024-12-12T20:41:25,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742295_1471 (size=20406) 2024-12-12T20:41:25,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742295_1471 (size=20406) 2024-12-12T20:41:25,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742295_1471 (size=20406) 2024-12-12T20:41:25,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742296_1472 (size=75495) 2024-12-12T20:41:25,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742296_1472 (size=75495) 2024-12-12T20:41:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742296_1472 (size=75495) 2024-12-12T20:41:25,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742297_1473 (size=45609) 2024-12-12T20:41:25,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742297_1473 (size=45609) 2024-12-12T20:41:25,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742297_1473 (size=45609) 2024-12-12T20:41:25,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742298_1474 (size=110084) 2024-12-12T20:41:25,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742298_1474 (size=110084) 2024-12-12T20:41:25,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742298_1474 (size=110084) 2024-12-12T20:41:25,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742299_1475 (size=1323991) 2024-12-12T20:41:25,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742299_1475 (size=1323991) 2024-12-12T20:41:25,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742299_1475 (size=1323991) 2024-12-12T20:41:25,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742300_1476 (size=23076) 2024-12-12T20:41:25,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742300_1476 (size=23076) 2024-12-12T20:41:25,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742300_1476 (size=23076) 2024-12-12T20:41:25,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742301_1477 (size=126803) 2024-12-12T20:41:25,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742301_1477 (size=126803) 2024-12-12T20:41:25,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742301_1477 (size=126803) 2024-12-12T20:41:25,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742302_1478 (size=322274) 2024-12-12T20:41:25,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742302_1478 (size=322274) 2024-12-12T20:41:25,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742302_1478 (size=322274) 2024-12-12T20:41:25,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742303_1479 (size=1832290) 2024-12-12T20:41:25,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742303_1479 (size=1832290) 2024-12-12T20:41:25,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742303_1479 (size=1832290) 2024-12-12T20:41:25,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742304_1480 (size=30081) 2024-12-12T20:41:25,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742304_1480 (size=30081) 2024-12-12T20:41:25,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742304_1480 (size=30081) 2024-12-12T20:41:25,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742305_1481 (size=53616) 2024-12-12T20:41:25,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742305_1481 (size=53616) 2024-12-12T20:41:25,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742305_1481 (size=53616) 2024-12-12T20:41:25,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742306_1482 (size=29229) 2024-12-12T20:41:25,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742306_1482 (size=29229) 2024-12-12T20:41:25,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742306_1482 (size=29229) 2024-12-12T20:41:25,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742307_1483 (size=169089) 2024-12-12T20:41:25,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742307_1483 (size=169089) 2024-12-12T20:41:25,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742307_1483 (size=169089) 2024-12-12T20:41:25,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742308_1484 (size=5175431) 2024-12-12T20:41:25,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742308_1484 (size=5175431) 2024-12-12T20:41:25,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742308_1484 (size=5175431) 2024-12-12T20:41:25,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742309_1485 (size=136454) 2024-12-12T20:41:25,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742309_1485 (size=136454) 2024-12-12T20:41:25,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742309_1485 (size=136454) 2024-12-12T20:41:25,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742310_1486 (size=907861) 2024-12-12T20:41:25,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742310_1486 (size=907861) 2024-12-12T20:41:25,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742310_1486 (size=907861) 2024-12-12T20:41:25,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742311_1487 (size=3317408) 2024-12-12T20:41:25,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742311_1487 (size=3317408) 2024-12-12T20:41:25,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742311_1487 (size=3317408) 2024-12-12T20:41:25,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742312_1488 (size=6350857) 2024-12-12T20:41:25,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742312_1488 (size=6350857) 2024-12-12T20:41:25,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742312_1488 (size=6350857) 2024-12-12T20:41:25,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742313_1489 (size=451756) 2024-12-12T20:41:25,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742313_1489 (size=451756) 2024-12-12T20:41:25,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742313_1489 (size=451756) 2024-12-12T20:41:25,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742314_1490 (size=503880) 2024-12-12T20:41:25,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742314_1490 (size=503880) 2024-12-12T20:41:25,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742314_1490 (size=503880) 2024-12-12T20:41:25,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742315_1491 (size=4695811) 2024-12-12T20:41:25,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742315_1491 (size=4695811) 2024-12-12T20:41:25,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742315_1491 (size=4695811) 2024-12-12T20:41:25,540 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T20:41:25,544 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-12T20:41:25,546 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-12T20:41:25,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742316_1492 (size=338) 2024-12-12T20:41:25,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742316_1492 (size=338) 2024-12-12T20:41:25,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742316_1492 (size=338) 2024-12-12T20:41:25,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742317_1493 (size=15) 2024-12-12T20:41:25,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742317_1493 (size=15) 2024-12-12T20:41:25,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742317_1493 (size=15) 2024-12-12T20:41:25,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742318_1494 (size=304883) 2024-12-12T20:41:25,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742318_1494 (size=304883) 2024-12-12T20:41:25,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742318_1494 (size=304883) 2024-12-12T20:41:26,166 INFO [0a737713f6e3:42407Replication Statistics #0 {}] regionserver.Replication$ReplicationStatisticsTask(247): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-12T20:41:26,179 INFO [0a737713f6e3:36817Replication Statistics #0 {}] regionserver.Replication$ReplicationStatisticsTask(247): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-12T20:41:26,180 INFO [0a737713f6e3:46859Replication Statistics #0 {}] regionserver.Replication$ReplicationStatisticsTask(247): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-12T20:41:26,773 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_3/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000005/launch_container.sh] 2024-12-12T20:41:26,773 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_3/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000005/container_tokens] 2024-12-12T20:41:26,773 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_3/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000005/sysfs] 2024-12-12T20:41:27,770 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 376feae6d5935ea89b8dc97bd6e805f8 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:41:27,771 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 08f57703150ab9ef20f2776475b12ad2 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:41:27,778 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-12T20:41:27,778 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-12T20:41:27,795 DEBUG [master/0a737713f6e3:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-12T20:41:27,797 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-12-12T20:41:27,798 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:41:27,799 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:41:27,799 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:41:27,799 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:41:27,799 INFO [master/0a737713f6e3:0.Chore.1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:41:27,799 INFO [master/0a737713f6e3:0.Chore.1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:41:27,799 INFO [master/0a737713f6e3:0.Chore.1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:41:27,799 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.BalancerClusterState(319): Number of tables=5, number of hosts=1, number of racks=1 2024-12-12T20:41:27,804 INFO [master/0a737713f6e3:0.Chore.1 {}] balancer.StochasticLoadBalancer(391): Running balancer because cluster has sloppy server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.27429188517743175, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-12T20:41:27,804 INFO [master/0a737713f6e3:0.Chore.1 {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.24442847606098983, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.27429188517743175, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=16800 2024-12-12T20:41:28,052 INFO [master/0a737713f6e3:0.Chore.1 {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 252 ms to try 16800 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.24442847606098983 to a new imbalance of 0.015876555685201695. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.14285714285714285, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8406401015194013, need balance); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8708972029788825, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-12T20:41:28,052 INFO [master/0a737713f6e3:0.Chore.1 {}] master.HMaster(2108): Balancer plans size is 1, the balance interval is 300000 ms, and the max number regions in transition is 7 2024-12-12T20:41:28,052 INFO [master/0a737713f6e3:0.Chore.1 {}] master.HMaster(2113): balance hri=039d8ca4d0468f0362e2baff27ee7042, source=0a737713f6e3,46859,1734035782031, destination=0a737713f6e3,36817,1734035782356 2024-12-12T20:41:28,053 DEBUG [master/0a737713f6e3:0.Chore.1 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=hbase:acl, region=039d8ca4d0468f0362e2baff27ee7042, REOPEN/MOVE 2024-12-12T20:41:28,053 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=191, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=hbase:acl, region=039d8ca4d0468f0362e2baff27ee7042, REOPEN/MOVE 2024-12-12T20:41:28,055 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=191 updating hbase:meta row=039d8ca4d0468f0362e2baff27ee7042, regionState=CLOSING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:41:28,057 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:41:28,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE; CloseRegionProcedure 039d8ca4d0468f0362e2baff27ee7042, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:41:28,209 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:41:28,213 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(124): Close 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:41:28,213 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:41:28,215 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1681): Closing 039d8ca4d0468f0362e2baff27ee7042, disabling compactions & flushes 2024-12-12T20:41:28,215 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1703): Closing region hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:41:28,215 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:41:28,215 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. after waiting 0 ms 2024-12-12T20:41:28,215 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:41:28,216 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(2837): Flushing 039d8ca4d0468f0362e2baff27ee7042 1/1 column families, dataSize=1.19 KB heapSize=2.92 KB 2024-12-12T20:41:28,251 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/.tmp/l/cce449b2c5a444c999ad9ea0fe2d9421 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1734036017159/DeleteFamily/seqid=0 2024-12-12T20:41:28,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742319_1495 (size=5642) 2024-12-12T20:41:28,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742319_1495 (size=5642) 2024-12-12T20:41:28,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742319_1495 (size=5642) 2024-12-12T20:41:28,359 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.19 KB at sequenceid=24 (bloomFilter=false), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/.tmp/l/cce449b2c5a444c999ad9ea0fe2d9421 2024-12-12T20:41:28,365 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cce449b2c5a444c999ad9ea0fe2d9421 2024-12-12T20:41:28,368 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/.tmp/l/cce449b2c5a444c999ad9ea0fe2d9421 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/l/cce449b2c5a444c999ad9ea0fe2d9421 2024-12-12T20:41:28,379 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cce449b2c5a444c999ad9ea0fe2d9421 2024-12-12T20:41:28,379 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/l/cce449b2c5a444c999ad9ea0fe2d9421, entries=11, sequenceid=24, filesize=5.5 K 2024-12-12T20:41:28,384 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(3040): Finished flush of dataSize ~1.19 KB/1222, heapSize ~2.91 KB/2976, currentSize=0 B/0 for 039d8ca4d0468f0362e2baff27ee7042 in 169ms, sequenceid=24, compaction requested=false 2024-12-12T20:41:28,425 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/recovered.edits/27.seqid, newMaxSeqId=27, maxSeqId=1 2024-12-12T20:41:28,428 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:41:28,428 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1922): Closed hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:41:28,428 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1635): Region close journal for 039d8ca4d0468f0362e2baff27ee7042: 2024-12-12T20:41:28,428 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegionServer(3789): Adding 039d8ca4d0468f0362e2baff27ee7042 move to 0a737713f6e3,36817,1734035782356 record at close sequenceid=24 2024-12-12T20:41:28,439 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(170): Closed 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:41:28,444 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=191 updating hbase:meta row=039d8ca4d0468f0362e2baff27ee7042, regionState=CLOSED 2024-12-12T20:41:28,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-12T20:41:28,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseRegionProcedure 039d8ca4d0468f0362e2baff27ee7042, server=0a737713f6e3,46859,1734035782031 in 395 msec 2024-12-12T20:41:28,491 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=191, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:acl, region=039d8ca4d0468f0362e2baff27ee7042, REOPEN/MOVE; state=CLOSED, location=0a737713f6e3,36817,1734035782356; forceNewPlan=false, retain=false 2024-12-12T20:41:28,642 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-12T20:41:28,642 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=191 updating hbase:meta row=039d8ca4d0468f0362e2baff27ee7042, regionState=OPENING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:41:28,645 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=191, state=RUNNABLE; OpenRegionProcedure 039d8ca4d0468f0362e2baff27ee7042, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:41:28,798 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:41:28,825 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] handler.AssignRegionHandler(135): Open hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:41:28,825 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegion(7285): Opening region: {ENCODED => 039d8ca4d0468f0362e2baff27ee7042, NAME => 'hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042.', STARTKEY => '', ENDKEY => ''} 2024-12-12T20:41:28,826 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. service=AccessControlService 2024-12-12T20:41:28,826 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:41:28,827 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:41:28,827 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegion(894): Instantiated hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:41:28,827 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegion(7327): checking encryption for 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:41:28,827 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegion(7330): checking classloading for 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:41:28,851 INFO [StoreOpener-039d8ca4d0468f0362e2baff27ee7042-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:41:28,853 INFO [StoreOpener-039d8ca4d0468f0362e2baff27ee7042-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 039d8ca4d0468f0362e2baff27ee7042 columnFamilyName l 2024-12-12T20:41:28,853 DEBUG [StoreOpener-039d8ca4d0468f0362e2baff27ee7042-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:41:28,865 INFO [StoreFileOpener-039d8ca4d0468f0362e2baff27ee7042-l-1 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cce449b2c5a444c999ad9ea0fe2d9421 2024-12-12T20:41:28,865 DEBUG [StoreOpener-039d8ca4d0468f0362e2baff27ee7042-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/l/cce449b2c5a444c999ad9ea0fe2d9421 2024-12-12T20:41:28,866 INFO [StoreOpener-039d8ca4d0468f0362e2baff27ee7042-1 {}] regionserver.HStore(327): Store=039d8ca4d0468f0362e2baff27ee7042/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:41:28,867 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:41:28,868 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:41:28,873 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegion(1085): writing seq id for 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:41:28,874 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegion(1102): Opened 039d8ca4d0468f0362e2baff27ee7042; next sequenceid=28; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75254771, jitterRate=0.12138347327709198}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:41:28,878 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-12T20:41:28,879 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-12T20:41:29,035 INFO [regionserver/0a737713f6e3:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(2070): MemstoreFlusherChore requesting flush of hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. because 419ba590b58c1bffeff3e689922ee9a3/info has an old edit so flush to free WALs after random delay 119380 ms 2024-12-12T20:41:29,035 INFO [regionserver/0a737713f6e3:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(2070): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/info has an old edit so flush to free WALs after random delay 221581 ms 2024-12-12T20:41:29,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-12T20:41:29,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-12T20:41:29,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-12T20:41:29,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-12T20:41:29,228 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:29,228 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:29,228 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:29,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:29,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:29,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:29,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:29,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:29,566 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegion(1001): Region open journal for 039d8ca4d0468f0362e2baff27ee7042: 2024-12-12T20:41:29,567 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:29,574 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:29,575 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:29,575 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042., pid=193, masterSystemTime=1734036088797 2024-12-12T20:41:29,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:29,578 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:41:29,578 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=193}] handler.AssignRegionHandler(164): Opened hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:41:29,580 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=191 updating hbase:meta row=039d8ca4d0468f0362e2baff27ee7042, regionState=OPEN, openSeqNum=28, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:41:29,587 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=193, resume processing ppid=191 2024-12-12T20:41:29,587 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=191, state=SUCCESS; OpenRegionProcedure 039d8ca4d0468f0362e2baff27ee7042, server=0a737713f6e3,36817,1734035782356 in 938 msec 2024-12-12T20:41:29,588 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; TransitRegionStateProcedure table=hbase:acl, region=039d8ca4d0468f0362e2baff27ee7042, REOPEN/MOVE in 1.5350 sec 2024-12-12T20:41:29,667 DEBUG [master/0a737713f6e3:0.Chore.1 {}] master.HMaster(2144): Balancer is going into sleep until next period in 300000ms 2024-12-12T20:41:29,829 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:41:29,829 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:41:29,845 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0008_000001 (auth:SIMPLE) from 127.0.0.1:42550 2024-12-12T20:41:29,855 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_2/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000001/launch_container.sh] 2024-12-12T20:41:29,855 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_2/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000001/container_tokens] 2024-12-12T20:41:29,855 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_2/usercache/jenkins/appcache/application_1734035798001_0008/container_1734035798001_0008_01_000001/sysfs] 2024-12-12T20:41:30,220 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0009_000001 (auth:SIMPLE) from 127.0.0.1:39168 2024-12-12T20:41:32,008 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-12T20:41:32,810 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:41:34,594 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 376feae6d5935ea89b8dc97bd6e805f8, had cached 0 bytes from a total of 8120 2024-12-12T20:41:34,595 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 08f57703150ab9ef20f2776475b12ad2, had cached 0 bytes from a total of 5490 2024-12-12T20:41:39,360 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0009_000001 (auth:SIMPLE) from 127.0.0.1:49090 2024-12-12T20:41:39,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742320_1496 (size=350557) 2024-12-12T20:41:39,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742320_1496 (size=350557) 2024-12-12T20:41:39,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742320_1496 (size=350557) 2024-12-12T20:41:41,620 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0009_000001 (auth:SIMPLE) from 127.0.0.1:41062 2024-12-12T20:41:45,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742321_1497 (size=8120) 2024-12-12T20:41:45,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742321_1497 (size=8120) 2024-12-12T20:41:45,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742321_1497 (size=8120) 2024-12-12T20:41:45,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742322_1498 (size=5490) 2024-12-12T20:41:45,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742322_1498 (size=5490) 2024-12-12T20:41:45,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742322_1498 (size=5490) 2024-12-12T20:41:45,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742323_1499 (size=17413) 2024-12-12T20:41:45,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742323_1499 (size=17413) 2024-12-12T20:41:45,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742323_1499 (size=17413) 2024-12-12T20:41:45,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742324_1500 (size=462) 2024-12-12T20:41:45,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742324_1500 (size=462) 2024-12-12T20:41:45,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742324_1500 (size=462) 2024-12-12T20:41:45,430 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_2/usercache/jenkins/appcache/application_1734035798001_0009/container_1734035798001_0009_01_000002/launch_container.sh] 2024-12-12T20:41:45,430 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_2/usercache/jenkins/appcache/application_1734035798001_0009/container_1734035798001_0009_01_000002/container_tokens] 2024-12-12T20:41:45,430 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_2/usercache/jenkins/appcache/application_1734035798001_0009/container_1734035798001_0009_01_000002/sysfs] 2024-12-12T20:41:45,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742325_1501 (size=17413) 2024-12-12T20:41:45,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742325_1501 (size=17413) 2024-12-12T20:41:45,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742325_1501 (size=17413) 2024-12-12T20:41:45,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742326_1502 (size=350557) 2024-12-12T20:41:45,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742326_1502 (size=350557) 2024-12-12T20:41:45,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742326_1502 (size=350557) 2024-12-12T20:41:46,844 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-12T20:41:46,846 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-12T20:41:46,862 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportWithChecksum 2024-12-12T20:41:46,862 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-12T20:41:46,862 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-12T20:41:46,863 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-12T20:41:46,863 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-12T20:41:46,863 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-12T20:41:46,863 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036083070/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036083070/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-12T20:41:46,864 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036083070/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-12T20:41:46,864 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036083070/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-12T20:41:46,913 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithChecksum 2024-12-12T20:41:46,915 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-12T20:41:46,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=194, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-12T20:41:46,922 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036106921"}]},"ts":"1734036106921"} 2024-12-12T20:41:46,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=194 2024-12-12T20:41:46,927 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-12T20:41:46,965 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-12T20:41:46,966 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=195, ppid=194, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-12T20:41:46,969 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=08f57703150ab9ef20f2776475b12ad2, UNASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=376feae6d5935ea89b8dc97bd6e805f8, UNASSIGN}] 2024-12-12T20:41:46,970 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=376feae6d5935ea89b8dc97bd6e805f8, UNASSIGN 2024-12-12T20:41:46,970 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=08f57703150ab9ef20f2776475b12ad2, UNASSIGN 2024-12-12T20:41:46,974 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=197 updating hbase:meta row=376feae6d5935ea89b8dc97bd6e805f8, regionState=CLOSING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:41:46,974 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=196 updating hbase:meta row=08f57703150ab9ef20f2776475b12ad2, regionState=CLOSING, regionLocation=0a737713f6e3,46859,1734035782031 2024-12-12T20:41:46,977 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:41:46,977 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=198, ppid=197, state=RUNNABLE; CloseRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:41:46,978 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:41:46,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=199, ppid=196, state=RUNNABLE; CloseRegionProcedure 08f57703150ab9ef20f2776475b12ad2, server=0a737713f6e3,46859,1734035782031}] 2024-12-12T20:41:47,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=194 2024-12-12T20:41:47,129 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:41:47,130 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] handler.UnassignRegionHandler(124): Close 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:41:47,130 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:41:47,130 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1681): Closing 376feae6d5935ea89b8dc97bd6e805f8, disabling compactions & flushes 2024-12-12T20:41:47,130 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:41:47,130 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:41:47,130 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. after waiting 0 ms 2024-12-12T20:41:47,130 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:41:47,131 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,46859,1734035782031 2024-12-12T20:41:47,132 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] handler.UnassignRegionHandler(124): Close 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:41:47,132 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:41:47,132 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] regionserver.HRegion(1681): Closing 08f57703150ab9ef20f2776475b12ad2, disabling compactions & flushes 2024-12-12T20:41:47,132 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:41:47,132 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:41:47,132 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. after waiting 0 ms 2024-12-12T20:41:47,132 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:41:47,134 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:41:47,134 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:41:47,134 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8. 2024-12-12T20:41:47,134 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1635): Region close journal for 376feae6d5935ea89b8dc97bd6e805f8: 2024-12-12T20:41:47,135 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:41:47,135 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:41:47,135 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2. 2024-12-12T20:41:47,136 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] regionserver.HRegion(1635): Region close journal for 08f57703150ab9ef20f2776475b12ad2: 2024-12-12T20:41:47,136 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] handler.UnassignRegionHandler(170): Closed 376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:41:47,137 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=197 updating hbase:meta row=376feae6d5935ea89b8dc97bd6e805f8, regionState=CLOSED 2024-12-12T20:41:47,137 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=199}] handler.UnassignRegionHandler(170): Closed 08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:41:47,137 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=196 updating hbase:meta row=08f57703150ab9ef20f2776475b12ad2, regionState=CLOSED 2024-12-12T20:41:47,139 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=198, resume processing ppid=197 2024-12-12T20:41:47,139 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, ppid=197, state=SUCCESS; CloseRegionProcedure 376feae6d5935ea89b8dc97bd6e805f8, server=0a737713f6e3,42407,1734035781742 in 161 msec 2024-12-12T20:41:47,140 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=199, resume processing ppid=196 2024-12-12T20:41:47,140 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, ppid=196, state=SUCCESS; CloseRegionProcedure 08f57703150ab9ef20f2776475b12ad2, server=0a737713f6e3,46859,1734035782031 in 160 msec 2024-12-12T20:41:47,141 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=195, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=08f57703150ab9ef20f2776475b12ad2, UNASSIGN in 171 msec 2024-12-12T20:41:47,142 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=197, resume processing ppid=195 2024-12-12T20:41:47,142 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, ppid=195, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=376feae6d5935ea89b8dc97bd6e805f8, UNASSIGN in 172 msec 2024-12-12T20:41:47,151 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=195, resume processing ppid=194 2024-12-12T20:41:47,151 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, ppid=194, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 176 msec 2024-12-12T20:41:47,152 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036107152"}]},"ts":"1734036107152"} 2024-12-12T20:41:47,154 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-12T20:41:47,162 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-12T20:41:47,164 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithChecksum in 248 msec 2024-12-12T20:41:47,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=194 2024-12-12T20:41:47,228 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum, procId: 194 completed 2024-12-12T20:41:47,229 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-12T20:41:47,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=200, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T20:41:47,235 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=200, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T20:41:47,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-12T20:41:47,237 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46859 {}] ipc.CallRunner(138): callId: 344 service: ClientService methodName: Mutate size: 159 connection: 172.17.0.2:50246 deadline: 1734036167237, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=0a737713f6e3 port=36817 startCode=1734035782356. As of locationSeqNum=24. 2024-12-12T20:41:47,242 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=200, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T20:41:47,247 DEBUG [HFileArchiver-37 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:41:47,247 DEBUG [HFileArchiver-38 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:41:47,250 DEBUG [HFileArchiver-37 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/recovered.edits] 2024-12-12T20:41:47,250 DEBUG [HFileArchiver-38 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/recovered.edits] 2024-12-12T20:41:47,256 DEBUG [HFileArchiver-40 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/cf/59c9d271eb3d49f9ade34794a3fc645f to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/cf/59c9d271eb3d49f9ade34794a3fc645f 2024-12-12T20:41:47,256 DEBUG [HFileArchiver-39 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/cf/13997210ed5a404ebb63ba0ce5f8491b 2024-12-12T20:41:47,265 DEBUG [HFileArchiver-41 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2/recovered.edits/9.seqid 2024-12-12T20:41:47,267 DEBUG [HFileArchiver-37 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/08f57703150ab9ef20f2776475b12ad2 2024-12-12T20:41:47,269 DEBUG [HFileArchiver-42 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8/recovered.edits/9.seqid 2024-12-12T20:41:47,269 DEBUG [HFileArchiver-38 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportWithChecksum/376feae6d5935ea89b8dc97bd6e805f8 2024-12-12T20:41:47,269 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-12T20:41:47,278 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=200, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T20:41:47,281 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-12T20:41:47,283 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-12T20:41:47,285 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=200, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T20:41:47,285 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-12T20:41:47,285 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734036107285"}]},"ts":"9223372036854775807"} 2024-12-12T20:41:47,285 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734036107285"}]},"ts":"9223372036854775807"} 2024-12-12T20:41:47,287 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-12T20:41:47,287 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 08f57703150ab9ef20f2776475b12ad2, NAME => 'testtb-testExportWithChecksum,,1734036049213.08f57703150ab9ef20f2776475b12ad2.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 376feae6d5935ea89b8dc97bd6e805f8, NAME => 'testtb-testExportWithChecksum,1,1734036049213.376feae6d5935ea89b8dc97bd6e805f8.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T20:41:47,287 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-12T20:41:47,287 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734036107287"}]},"ts":"9223372036854775807"} 2024-12-12T20:41:47,293 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithChecksum state from META 2024-12-12T20:41:47,304 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=200, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T20:41:47,305 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=200, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithChecksum in 76 msec 2024-12-12T20:41:47,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:41:47,345 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51536, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:41:47,348 DEBUG [hconnection-0x7d2c1b59-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:41:47,349 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-12T20:41:47,354 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36817 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-12T20:41:47,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:47,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:47,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:47,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:47,374 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-12T20:41:47,374 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-12T20:41:47,375 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-12T20:41:47,375 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-12T20:41:47,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:47,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:47,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:41:47,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:47,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T20:41:47,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:41:47,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:41:47,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:41:47,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=200 2024-12-12T20:41:47,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:47,383 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum, procId: 200 completed 2024-12-12T20:41:47,384 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:47,384 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:47,384 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:47,389 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" 2024-12-12T20:41:47,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-12T20:41:47,393 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" 2024-12-12T20:41:47,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-12T20:41:47,424 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=813 (was 809) Potentially hanging thread: HFileArchiver-42 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-37 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-41 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:48466 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-49 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-51 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-50 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-39 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-12963069_1 at /127.0.0.1:48446 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6631 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:54538 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-40 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874193583) connection to localhost/127.0.0.1:41791 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165987477_22 at /127.0.0.1:48752 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-38 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 76310) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-12963069_1 at /127.0.0.1:54500 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2928eebe-shared-pool-48 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x7d2c1b59-metaLookup-shared--pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=817 (was 808) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1232 (was 1348), ProcessCount=17 (was 15) - ProcessCount LEAK? -, AvailableMemoryMB=1589 (was 1933) 2024-12-12T20:41:47,425 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-12-12T20:41:47,450 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=813, OpenFileDescriptor=817, MaxFileDescriptor=1048576, SystemLoadAverage=1232, ProcessCount=17, AvailableMemoryMB=1591 2024-12-12T20:41:47,450 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-12-12T20:41:47,452 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T20:41:47,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=201, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:47,453 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=201, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T20:41:47,454 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:41:47,454 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 201 2024-12-12T20:41:47,454 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=201, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T20:41:47,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=201 2024-12-12T20:41:47,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742327_1503 (size=418) 2024-12-12T20:41:47,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742327_1503 (size=418) 2024-12-12T20:41:47,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742327_1503 (size=418) 2024-12-12T20:41:47,487 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d78bfcfd6b10390d29a02e632b308ca0, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:41:47,487 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 67760c03f7f8f9e3deb21b2777ea6a3e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:41:47,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742329_1505 (size=79) 2024-12-12T20:41:47,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742329_1505 (size=79) 2024-12-12T20:41:47,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742329_1505 (size=79) 2024-12-12T20:41:47,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:41:47,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1681): Closing d78bfcfd6b10390d29a02e632b308ca0, disabling compactions & flushes 2024-12-12T20:41:47,503 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:47,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:47,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. after waiting 0 ms 2024-12-12T20:41:47,504 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:47,504 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:47,504 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1635): Region close journal for d78bfcfd6b10390d29a02e632b308ca0: 2024-12-12T20:41:47,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742328_1504 (size=79) 2024-12-12T20:41:47,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742328_1504 (size=79) 2024-12-12T20:41:47,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742328_1504 (size=79) 2024-12-12T20:41:47,509 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:41:47,509 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1681): Closing 67760c03f7f8f9e3deb21b2777ea6a3e, disabling compactions & flushes 2024-12-12T20:41:47,509 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:41:47,509 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:41:47,509 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. after waiting 0 ms 2024-12-12T20:41:47,509 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:41:47,509 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:41:47,509 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1635): Region close journal for 67760c03f7f8f9e3deb21b2777ea6a3e: 2024-12-12T20:41:47,511 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=201, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T20:41:47,513 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734036107511"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734036107511"}]},"ts":"1734036107511"} 2024-12-12T20:41:47,513 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734036107511"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734036107511"}]},"ts":"1734036107511"} 2024-12-12T20:41:47,517 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-12T20:41:47,520 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=201, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T20:41:47,520 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036107520"}]},"ts":"1734036107520"} 2024-12-12T20:41:47,523 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-12T20:41:47,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=201 2024-12-12T20:41:47,729 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {0a737713f6e3=0} racks are {/default-rack=0} 2024-12-12T20:41:47,732 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-12T20:41:47,732 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-12T20:41:47,732 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-12T20:41:47,732 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-12T20:41:47,732 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-12T20:41:47,732 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-12T20:41:47,732 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T20:41:47,733 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=202, ppid=201, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d78bfcfd6b10390d29a02e632b308ca0, ASSIGN}, {pid=203, ppid=201, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67760c03f7f8f9e3deb21b2777ea6a3e, ASSIGN}] 2024-12-12T20:41:47,736 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=203, ppid=201, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67760c03f7f8f9e3deb21b2777ea6a3e, ASSIGN 2024-12-12T20:41:47,736 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=202, ppid=201, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d78bfcfd6b10390d29a02e632b308ca0, ASSIGN 2024-12-12T20:41:47,738 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=203, ppid=201, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67760c03f7f8f9e3deb21b2777ea6a3e, ASSIGN; state=OFFLINE, location=0a737713f6e3,42407,1734035781742; forceNewPlan=false, retain=false 2024-12-12T20:41:47,738 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=202, ppid=201, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d78bfcfd6b10390d29a02e632b308ca0, ASSIGN; state=OFFLINE, location=0a737713f6e3,36817,1734035782356; forceNewPlan=false, retain=false 2024-12-12T20:41:47,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=201 2024-12-12T20:41:47,888 INFO [0a737713f6e3:43067 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T20:41:47,889 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=202 updating hbase:meta row=d78bfcfd6b10390d29a02e632b308ca0, regionState=OPENING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:41:47,889 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=203 updating hbase:meta row=67760c03f7f8f9e3deb21b2777ea6a3e, regionState=OPENING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:41:47,894 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE; OpenRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:41:47,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=205, ppid=202, state=RUNNABLE; OpenRegionProcedure d78bfcfd6b10390d29a02e632b308ca0, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:41:48,054 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:41:48,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=201 2024-12-12T20:41:48,060 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:41:48,060 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegion(7285): Opening region: {ENCODED => 67760c03f7f8f9e3deb21b2777ea6a3e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T20:41:48,060 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. service=AccessControlService 2024-12-12T20:41:48,061 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:41:48,061 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:48,061 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:41:48,061 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegion(7327): checking encryption for 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:48,061 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegion(7330): checking classloading for 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:48,062 INFO [StoreOpener-67760c03f7f8f9e3deb21b2777ea6a3e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:48,064 INFO [StoreOpener-67760c03f7f8f9e3deb21b2777ea6a3e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 67760c03f7f8f9e3deb21b2777ea6a3e columnFamilyName cf 2024-12-12T20:41:48,064 DEBUG [StoreOpener-67760c03f7f8f9e3deb21b2777ea6a3e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:41:48,064 INFO [StoreOpener-67760c03f7f8f9e3deb21b2777ea6a3e-1 {}] regionserver.HStore(327): Store=67760c03f7f8f9e3deb21b2777ea6a3e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:41:48,065 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:48,065 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:48,067 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:41:48,067 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegion(1085): writing seq id for 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:48,076 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:41:48,090 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegion(1102): Opened 67760c03f7f8f9e3deb21b2777ea6a3e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60885778, jitterRate=-0.09273120760917664}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:41:48,091 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegion(1001): Region open journal for 67760c03f7f8f9e3deb21b2777ea6a3e: 2024-12-12T20:41:48,094 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:48,094 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegion(7285): Opening region: {ENCODED => d78bfcfd6b10390d29a02e632b308ca0, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T20:41:48,094 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. service=AccessControlService 2024-12-12T20:41:48,095 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T20:41:48,095 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:48,095 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T20:41:48,095 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegion(7327): checking encryption for d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:48,095 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegion(7330): checking classloading for d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:48,099 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e., pid=204, masterSystemTime=1734036108054 2024-12-12T20:41:48,103 INFO [StoreOpener-d78bfcfd6b10390d29a02e632b308ca0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:48,105 INFO [StoreOpener-d78bfcfd6b10390d29a02e632b308ca0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d78bfcfd6b10390d29a02e632b308ca0 columnFamilyName cf 2024-12-12T20:41:48,105 DEBUG [StoreOpener-d78bfcfd6b10390d29a02e632b308ca0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T20:41:48,105 INFO [StoreOpener-d78bfcfd6b10390d29a02e632b308ca0-1 {}] regionserver.HStore(327): Store=d78bfcfd6b10390d29a02e632b308ca0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T20:41:48,106 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:48,107 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:48,108 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:41:48,108 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=204}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:41:48,108 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=203 updating hbase:meta row=67760c03f7f8f9e3deb21b2777ea6a3e, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:41:48,109 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegion(1085): writing seq id for d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:48,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=204, resume processing ppid=203 2024-12-12T20:41:48,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=204, ppid=203, state=SUCCESS; OpenRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e, server=0a737713f6e3,42407,1734035781742 in 216 msec 2024-12-12T20:41:48,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=203, ppid=201, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67760c03f7f8f9e3deb21b2777ea6a3e, ASSIGN in 379 msec 2024-12-12T20:41:48,114 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T20:41:48,115 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegion(1102): Opened d78bfcfd6b10390d29a02e632b308ca0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72993828, jitterRate=0.08769279718399048}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T20:41:48,115 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegion(1001): Region open journal for d78bfcfd6b10390d29a02e632b308ca0: 2024-12-12T20:41:48,116 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0., pid=205, masterSystemTime=1734036108067 2024-12-12T20:41:48,117 DEBUG [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:48,117 INFO [RS_OPEN_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_OPEN_REGION, pid=205}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:48,117 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=202 updating hbase:meta row=d78bfcfd6b10390d29a02e632b308ca0, regionState=OPEN, openSeqNum=2, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:41:48,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=205, resume processing ppid=202 2024-12-12T20:41:48,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=205, ppid=202, state=SUCCESS; OpenRegionProcedure d78bfcfd6b10390d29a02e632b308ca0, server=0a737713f6e3,36817,1734035782356 in 219 msec 2024-12-12T20:41:48,121 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=202, resume processing ppid=201 2024-12-12T20:41:48,121 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=202, ppid=201, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d78bfcfd6b10390d29a02e632b308ca0, ASSIGN in 387 msec 2024-12-12T20:41:48,122 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=201, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T20:41:48,122 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036108122"}]},"ts":"1734036108122"} 2024-12-12T20:41:48,123 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-12T20:41:48,146 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=201, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T20:41:48,146 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-12T20:41:48,148 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36817 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-12T20:41:48,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:41:48,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:41:48,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:41:48,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:41:48,166 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:48,166 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:48,166 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:48,166 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:48,167 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:48,167 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:48,167 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:48,167 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-12T20:41:48,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=201, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 714 msec 2024-12-12T20:41:48,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=201 2024-12-12T20:41:48,559 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 201 completed 2024-12-12T20:41:48,559 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-12T20:41:48,559 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:41:48,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46859 {}] regionserver.StoreScanner(1133): Switch to stream read (scanned=32836 bytes) of info 2024-12-12T20:41:48,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-12T20:41:48,570 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:41:48,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-12T20:41:48,573 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-12T20:41:48,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734036108573 (current time:1734036108573). 2024-12-12T20:41:48,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:41:48,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-12T20:41:48,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:41:48,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x385eee3f to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d6d2899 2024-12-12T20:41:48,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22eefe2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:41:48,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:41:48,619 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40276, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:41:48,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x385eee3f to 127.0.0.1:52655 2024-12-12T20:41:48,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:41:48,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76ff0354 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2a2d4f2 2024-12-12T20:41:48,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57684514, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:41:48,646 DEBUG [hconnection-0x17dde7ea-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:41:48,647 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40284, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:41:48,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:41:48,650 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:41:48,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76ff0354 to 127.0.0.1:52655 2024-12-12T20:41:48,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:41:48,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-12T20:41:48,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:41:48,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-12T20:41:48,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-12T20:41:48,655 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:41:48,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-12T20:41:48,656 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:41:48,660 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:41:48,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742330_1506 (size=203) 2024-12-12T20:41:48,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742330_1506 (size=203) 2024-12-12T20:41:48,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742330_1506 (size=203) 2024-12-12T20:41:48,715 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:41:48,716 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure d78bfcfd6b10390d29a02e632b308ca0}, {pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e}] 2024-12-12T20:41:48,720 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:48,720 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:48,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-12T20:41:48,872 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:41:48,873 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:41:48,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207 2024-12-12T20:41:48,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:48,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208 2024-12-12T20:41:48,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:41:48,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2538): Flush status journal for d78bfcfd6b10390d29a02e632b308ca0: 2024-12-12T20:41:48,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-12T20:41:48,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2538): Flush status journal for 67760c03f7f8f9e3deb21b2777ea6a3e: 2024-12-12T20:41:48,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-12T20:41:48,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:48,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:48,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:41:48,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:41:48,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:41:48,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T20:41:48,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742331_1507 (size=82) 2024-12-12T20:41:48,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742331_1507 (size=82) 2024-12-12T20:41:48,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742331_1507 (size=82) 2024-12-12T20:41:48,925 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:41:48,925 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208 2024-12-12T20:41:48,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=208 2024-12-12T20:41:48,926 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:48,926 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:48,948 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=208, ppid=206, state=SUCCESS; SnapshotRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e in 225 msec 2024-12-12T20:41:48,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-12T20:41:48,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742332_1508 (size=82) 2024-12-12T20:41:48,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742332_1508 (size=82) 2024-12-12T20:41:48,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742332_1508 (size=82) 2024-12-12T20:41:49,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-12T20:41:49,301 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:41:49,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:49,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207 2024-12-12T20:41:49,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=207 2024-12-12T20:41:49,361 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:49,361 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:49,363 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=207, resume processing ppid=206 2024-12-12T20:41:49,363 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:41:49,363 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=207, ppid=206, state=SUCCESS; SnapshotRegionProcedure d78bfcfd6b10390d29a02e632b308ca0 in 647 msec 2024-12-12T20:41:49,364 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:41:49,364 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:41:49,364 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:49,365 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:49,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742333_1509 (size=585) 2024-12-12T20:41:49,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742333_1509 (size=585) 2024-12-12T20:41:49,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742333_1509 (size=585) 2024-12-12T20:41:49,386 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:41:49,390 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:41:49,391 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:49,392 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:41:49,392 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-12T20:41:49,393 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=206, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 739 msec 2024-12-12T20:41:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-12T20:41:49,762 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 206 completed 2024-12-12T20:41:49,768 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36817 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:41:49,769 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42407 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T20:41:49,772 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:49,772 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:49,772 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T20:41:49,784 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-12T20:41:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734036109784 (current time:1734036109784). 2024-12-12T20:41:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T20:41:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-12T20:41:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-12T20:41:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18f25533 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60e81370 2024-12-12T20:41:49,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c6da518, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:41:49,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:41:49,827 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40288, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:41:49,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18f25533 to 127.0.0.1:52655 2024-12-12T20:41:49,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:41:49,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7422d882 to 127.0.0.1:52655 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@547136e5 2024-12-12T20:41:49,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75951c16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T20:41:49,848 DEBUG [hconnection-0x110fadc7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:41:49,849 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40292, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:41:49,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T20:41:49,852 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T20:41:49,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7422d882 to 127.0.0.1:52655 2024-12-12T20:41:49,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:41:49,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-12T20:41:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-12T20:41:49,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=209, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-12T20:41:49,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 209 2024-12-12T20:41:49,857 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=209, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T20:41:49,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-12T20:41:49,858 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=209, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T20:41:49,860 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=209, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T20:41:49,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742334_1510 (size=198) 2024-12-12T20:41:49,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742334_1510 (size=198) 2024-12-12T20:41:49,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742334_1510 (size=198) 2024-12-12T20:41:49,866 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=209, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T20:41:49,866 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE; SnapshotRegionProcedure d78bfcfd6b10390d29a02e632b308ca0}, {pid=211, ppid=209, state=RUNNABLE; SnapshotRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e}] 2024-12-12T20:41:49,867 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=211, ppid=209, state=RUNNABLE; SnapshotRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:49,867 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=210, ppid=209, state=RUNNABLE; SnapshotRegionProcedure d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:49,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-12T20:41:50,009 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-12T20:41:50,018 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:41:50,018 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:41:50,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=210 2024-12-12T20:41:50,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42407 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=211 2024-12-12T20:41:50,018 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:50,018 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:41:50,019 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] regionserver.HRegion(2837): Flushing d78bfcfd6b10390d29a02e632b308ca0 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-12T20:41:50,019 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] regionserver.HRegion(2837): Flushing 67760c03f7f8f9e3deb21b2777ea6a3e 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-12T20:41:50,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/.tmp/cf/f4bb7985481845d1bd664a0f5d732c02 is 71, key is 05d827cffd4836ad209dbffe318fc968/cf:q/1734036109768/Put/seqid=0 2024-12-12T20:41:50,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/.tmp/cf/6469c87665b342b0867a203ade8bd402 is 71, key is 1eb88bf803820e65bb2f901494537121/cf:q/1734036109768/Put/seqid=0 2024-12-12T20:41:50,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742336_1512 (size=5288) 2024-12-12T20:41:50,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742336_1512 (size=5288) 2024-12-12T20:41:50,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742336_1512 (size=5288) 2024-12-12T20:41:50,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742335_1511 (size=8326) 2024-12-12T20:41:50,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742335_1511 (size=8326) 2024-12-12T20:41:50,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742335_1511 (size=8326) 2024-12-12T20:41:50,047 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/.tmp/cf/f4bb7985481845d1bd664a0f5d732c02 2024-12-12T20:41:50,048 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/.tmp/cf/6469c87665b342b0867a203ade8bd402 2024-12-12T20:41:50,053 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/.tmp/cf/f4bb7985481845d1bd664a0f5d732c02 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/cf/f4bb7985481845d1bd664a0f5d732c02 2024-12-12T20:41:50,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/.tmp/cf/6469c87665b342b0867a203ade8bd402 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/cf/6469c87665b342b0867a203ade8bd402 2024-12-12T20:41:50,059 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/cf/6469c87665b342b0867a203ade8bd402, entries=47, sequenceid=6, filesize=8.1 K 2024-12-12T20:41:50,060 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 67760c03f7f8f9e3deb21b2777ea6a3e in 41ms, sequenceid=6, compaction requested=false 2024-12-12T20:41:50,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] regionserver.HRegion(2538): Flush status journal for 67760c03f7f8f9e3deb21b2777ea6a3e: 2024-12-12T20:41:50,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-12T20:41:50,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:50,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:41:50,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/cf/6469c87665b342b0867a203ade8bd402] hfiles 2024-12-12T20:41:50,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/cf/6469c87665b342b0867a203ade8bd402 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:50,062 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/cf/f4bb7985481845d1bd664a0f5d732c02, entries=3, sequenceid=6, filesize=5.2 K 2024-12-12T20:41:50,062 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for d78bfcfd6b10390d29a02e632b308ca0 in 43ms, sequenceid=6, compaction requested=false 2024-12-12T20:41:50,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] regionserver.HRegion(2538): Flush status journal for d78bfcfd6b10390d29a02e632b308ca0: 2024-12-12T20:41:50,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-12T20:41:50,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:50,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T20:41:50,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/cf/f4bb7985481845d1bd664a0f5d732c02] hfiles 2024-12-12T20:41:50,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/cf/f4bb7985481845d1bd664a0f5d732c02 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:50,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742337_1513 (size=121) 2024-12-12T20:41:50,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742337_1513 (size=121) 2024-12-12T20:41:50,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742337_1513 (size=121) 2024-12-12T20:41:50,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742338_1514 (size=121) 2024-12-12T20:41:50,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742338_1514 (size=121) 2024-12-12T20:41:50,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742338_1514 (size=121) 2024-12-12T20:41:50,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:41:50,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=210}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=210 2024-12-12T20:41:50,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=210 2024-12-12T20:41:50,071 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:50,071 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=210, ppid=209, state=RUNNABLE; SnapshotRegionProcedure d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:41:50,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=210, ppid=209, state=SUCCESS; SnapshotRegionProcedure d78bfcfd6b10390d29a02e632b308ca0 in 208 msec 2024-12-12T20:41:50,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-12T20:41:50,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-12T20:41:50,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:41:50,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/0a737713f6e3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=211}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=211 2024-12-12T20:41:50,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster(4106): Remote procedure done, pid=211 2024-12-12T20:41:50,470 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:50,470 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=211, ppid=209, state=RUNNABLE; SnapshotRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:41:50,474 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=211, resume processing ppid=209 2024-12-12T20:41:50,474 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=211, ppid=209, state=SUCCESS; SnapshotRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e in 605 msec 2024-12-12T20:41:50,474 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=209, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T20:41:50,475 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=209, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T20:41:50,476 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=209, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T20:41:50,476 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:50,477 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:50,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742339_1515 (size=663) 2024-12-12T20:41:50,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742339_1515 (size=663) 2024-12-12T20:41:50,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742339_1515 (size=663) 2024-12-12T20:41:50,489 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=209, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T20:41:50,494 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=209, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T20:41:50,494 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:50,495 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=209, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T20:41:50,495 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 209 2024-12-12T20:41:50,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=209, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=209, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 640 msec 2024-12-12T20:41:50,861 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region d5a17aa0c520a1cdf2abb50af9af5441, had cached 0 bytes from a total of 8324 2024-12-12T20:41:50,880 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1c36c2e04ccc3d3187435c7e0ba7d6c6, had cached 0 bytes from a total of 5288 2024-12-12T20:41:50,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-12T20:41:50,963 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 209 completed 2024-12-12T20:41:50,963 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036110963 2024-12-12T20:41:50,963 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37615, tgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036110963, rawTgtDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036110963, srcFsUri=hdfs://localhost:37615, srcDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:41:50,996 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37615, inputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427 2024-12-12T20:41:50,996 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036110963, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036110963/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:50,998 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-12T20:41:51,001 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036110963/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:51,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742341_1517 (size=198) 2024-12-12T20:41:51,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742341_1517 (size=198) 2024-12-12T20:41:51,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742340_1516 (size=663) 2024-12-12T20:41:51,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742340_1516 (size=663) 2024-12-12T20:41:51,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742340_1516 (size=663) 2024-12-12T20:41:51,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742341_1517 (size=198) 2024-12-12T20:41:51,012 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:51,012 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:51,012 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:51,012 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:41:51,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-12T20:41:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-12T20:41:51,623 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0009_000001 (auth:SIMPLE) from 127.0.0.1:34254 2024-12-12T20:41:51,639 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_0/usercache/jenkins/appcache/application_1734035798001_0009/container_1734035798001_0009_01_000001/launch_container.sh] 2024-12-12T20:41:51,639 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_0/usercache/jenkins/appcache/application_1734035798001_0009/container_1734035798001_0009_01_000001/container_tokens] 2024-12-12T20:41:51,639 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-1_0/usercache/jenkins/appcache/application_1734035798001_0009/container_1734035798001_0009_01_000001/sysfs] 2024-12-12T20:41:51,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-15207062813608274614.jar 2024-12-12T20:41:51,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:51,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:52,023 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop-12368562879921148891.jar 2024-12-12T20:41:52,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:52,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:52,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:52,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:52,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:52,025 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-12T20:41:52,025 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T20:41:52,025 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T20:41:52,025 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T20:41:52,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T20:41:52,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T20:41:52,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T20:41:52,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-12T20:41:52,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T20:41:52,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T20:41:52,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T20:41:52,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T20:41:52,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T20:41:52,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:41:52,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:41:52,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:41:52,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:41:52,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T20:41:52,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:41:52,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T20:41:52,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742342_1518 (size=127628) 2024-12-12T20:41:52,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742342_1518 (size=127628) 2024-12-12T20:41:52,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742342_1518 (size=127628) 2024-12-12T20:41:52,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742343_1519 (size=2172101) 2024-12-12T20:41:52,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742343_1519 (size=2172101) 2024-12-12T20:41:52,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742343_1519 (size=2172101) 2024-12-12T20:41:52,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742344_1520 (size=213228) 2024-12-12T20:41:52,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742344_1520 (size=213228) 2024-12-12T20:41:52,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742344_1520 (size=213228) 2024-12-12T20:41:52,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742345_1521 (size=1877034) 2024-12-12T20:41:52,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742345_1521 (size=1877034) 2024-12-12T20:41:52,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742345_1521 (size=1877034) 2024-12-12T20:41:52,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742346_1522 (size=533455) 2024-12-12T20:41:52,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742346_1522 (size=533455) 2024-12-12T20:41:52,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742346_1522 (size=533455) 2024-12-12T20:41:52,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742347_1523 (size=7280644) 2024-12-12T20:41:52,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742347_1523 (size=7280644) 2024-12-12T20:41:52,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742347_1523 (size=7280644) 2024-12-12T20:41:52,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742348_1524 (size=451756) 2024-12-12T20:41:52,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742348_1524 (size=451756) 2024-12-12T20:41:52,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742348_1524 (size=451756) 2024-12-12T20:41:52,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742349_1525 (size=4188619) 2024-12-12T20:41:52,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742349_1525 (size=4188619) 2024-12-12T20:41:52,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742349_1525 (size=4188619) 2024-12-12T20:41:52,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742350_1526 (size=20406) 2024-12-12T20:41:52,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742350_1526 (size=20406) 2024-12-12T20:41:52,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742350_1526 (size=20406) 2024-12-12T20:41:52,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742351_1527 (size=75495) 2024-12-12T20:41:52,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742351_1527 (size=75495) 2024-12-12T20:41:52,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742351_1527 (size=75495) 2024-12-12T20:41:52,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742352_1528 (size=45609) 2024-12-12T20:41:52,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742352_1528 (size=45609) 2024-12-12T20:41:52,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742352_1528 (size=45609) 2024-12-12T20:41:52,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742353_1529 (size=110084) 2024-12-12T20:41:52,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742353_1529 (size=110084) 2024-12-12T20:41:52,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742353_1529 (size=110084) 2024-12-12T20:41:52,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742354_1530 (size=1323991) 2024-12-12T20:41:52,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742354_1530 (size=1323991) 2024-12-12T20:41:52,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742354_1530 (size=1323991) 2024-12-12T20:41:52,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742355_1531 (size=23076) 2024-12-12T20:41:52,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742355_1531 (size=23076) 2024-12-12T20:41:52,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742355_1531 (size=23076) 2024-12-12T20:41:52,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742356_1532 (size=126803) 2024-12-12T20:41:52,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742356_1532 (size=126803) 2024-12-12T20:41:52,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742356_1532 (size=126803) 2024-12-12T20:41:52,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742357_1533 (size=322274) 2024-12-12T20:41:52,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742357_1533 (size=322274) 2024-12-12T20:41:52,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742357_1533 (size=322274) 2024-12-12T20:41:52,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742358_1534 (size=1832290) 2024-12-12T20:41:52,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742358_1534 (size=1832290) 2024-12-12T20:41:52,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742358_1534 (size=1832290) 2024-12-12T20:41:52,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742359_1535 (size=30081) 2024-12-12T20:41:52,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742359_1535 (size=30081) 2024-12-12T20:41:52,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742359_1535 (size=30081) 2024-12-12T20:41:52,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742360_1536 (size=53616) 2024-12-12T20:41:52,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742360_1536 (size=53616) 2024-12-12T20:41:52,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742360_1536 (size=53616) 2024-12-12T20:41:52,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742361_1537 (size=6350857) 2024-12-12T20:41:52,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742361_1537 (size=6350857) 2024-12-12T20:41:52,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742361_1537 (size=6350857) 2024-12-12T20:41:52,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742362_1538 (size=29229) 2024-12-12T20:41:52,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742362_1538 (size=29229) 2024-12-12T20:41:52,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742362_1538 (size=29229) 2024-12-12T20:41:52,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742363_1539 (size=169089) 2024-12-12T20:41:52,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742363_1539 (size=169089) 2024-12-12T20:41:52,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742363_1539 (size=169089) 2024-12-12T20:41:52,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742364_1540 (size=5175431) 2024-12-12T20:41:52,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742364_1540 (size=5175431) 2024-12-12T20:41:52,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742364_1540 (size=5175431) 2024-12-12T20:41:52,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742365_1541 (size=136454) 2024-12-12T20:41:52,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742365_1541 (size=136454) 2024-12-12T20:41:52,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742365_1541 (size=136454) 2024-12-12T20:41:52,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742366_1542 (size=907861) 2024-12-12T20:41:52,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742366_1542 (size=907861) 2024-12-12T20:41:52,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742366_1542 (size=907861) 2024-12-12T20:41:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742367_1543 (size=3317408) 2024-12-12T20:41:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742367_1543 (size=3317408) 2024-12-12T20:41:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742367_1543 (size=3317408) 2024-12-12T20:41:52,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742368_1544 (size=503880) 2024-12-12T20:41:52,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742368_1544 (size=503880) 2024-12-12T20:41:52,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742368_1544 (size=503880) 2024-12-12T20:41:52,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742369_1545 (size=4695811) 2024-12-12T20:41:52,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742369_1545 (size=4695811) 2024-12-12T20:41:52,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742369_1545 (size=4695811) 2024-12-12T20:41:52,664 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:41:52,814 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T20:41:52,816 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-12T20:41:52,821 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-12T20:41:52,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742370_1546 (size=366) 2024-12-12T20:41:52,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742370_1546 (size=366) 2024-12-12T20:41:52,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742370_1546 (size=366) 2024-12-12T20:41:52,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742371_1547 (size=15) 2024-12-12T20:41:52,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742371_1547 (size=15) 2024-12-12T20:41:52,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742371_1547 (size=15) 2024-12-12T20:41:52,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742372_1548 (size=305057) 2024-12-12T20:41:52,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742372_1548 (size=305057) 2024-12-12T20:41:52,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742372_1548 (size=305057) 2024-12-12T20:41:53,405 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:41:53,405 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T20:41:53,435 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0010_000001 (auth:SIMPLE) from 127.0.0.1:60684 2024-12-12T20:41:59,174 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0010_000001 (auth:SIMPLE) from 127.0.0.1:40334 2024-12-12T20:41:59,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742373_1549 (size=350755) 2024-12-12T20:41:59,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742373_1549 (size=350755) 2024-12-12T20:41:59,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742373_1549 (size=350755) 2024-12-12T20:42:01,398 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0010_000001 (auth:SIMPLE) from 127.0.0.1:48310 2024-12-12T20:42:04,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742374_1550 (size=8326) 2024-12-12T20:42:04,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742374_1550 (size=8326) 2024-12-12T20:42:04,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742374_1550 (size=8326) 2024-12-12T20:42:04,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742375_1551 (size=5288) 2024-12-12T20:42:04,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742375_1551 (size=5288) 2024-12-12T20:42:04,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742375_1551 (size=5288) 2024-12-12T20:42:04,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742376_1552 (size=17455) 2024-12-12T20:42:04,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742376_1552 (size=17455) 2024-12-12T20:42:04,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742376_1552 (size=17455) 2024-12-12T20:42:04,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742377_1553 (size=476) 2024-12-12T20:42:04,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742377_1553 (size=476) 2024-12-12T20:42:04,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742377_1553 (size=476) 2024-12-12T20:42:04,772 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0010/container_1734035798001_0010_01_000002/launch_container.sh] 2024-12-12T20:42:04,772 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0010/container_1734035798001_0010_01_000002/container_tokens] 2024-12-12T20:42:04,772 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_1/usercache/jenkins/appcache/application_1734035798001_0010/container_1734035798001_0010_01_000002/sysfs] 2024-12-12T20:42:04,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742378_1554 (size=17455) 2024-12-12T20:42:04,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742378_1554 (size=17455) 2024-12-12T20:42:04,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742378_1554 (size=17455) 2024-12-12T20:42:04,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742379_1555 (size=350755) 2024-12-12T20:42:04,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742379_1555 (size=350755) 2024-12-12T20:42:04,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742379_1555 (size=350755) 2024-12-12T20:42:04,816 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0010_000001 (auth:SIMPLE) from 127.0.0.1:48326 2024-12-12T20:42:06,548 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-12T20:42:06,549 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-12T20:42:06,556 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,556 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-12T20:42:06,557 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-12T20:42:06,557 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,557 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-12T20:42:06,557 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-12T20:42:06,557 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1165987477_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036110963/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036110963/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,558 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036110963/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-12T20:42:06,558 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/export-test/export-1734036110963/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-12T20:42:06,564 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,565 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=212, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=212 2024-12-12T20:42:06,567 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036126567"}]},"ts":"1734036126567"} 2024-12-12T20:42:06,568 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-12T20:42:06,607 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-12T20:42:06,608 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=213, ppid=212, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-12T20:42:06,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d78bfcfd6b10390d29a02e632b308ca0, UNASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67760c03f7f8f9e3deb21b2777ea6a3e, UNASSIGN}] 2024-12-12T20:42:06,610 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67760c03f7f8f9e3deb21b2777ea6a3e, UNASSIGN 2024-12-12T20:42:06,610 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d78bfcfd6b10390d29a02e632b308ca0, UNASSIGN 2024-12-12T20:42:06,611 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=214 updating hbase:meta row=d78bfcfd6b10390d29a02e632b308ca0, regionState=CLOSING, regionLocation=0a737713f6e3,36817,1734035782356 2024-12-12T20:42:06,611 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=215 updating hbase:meta row=67760c03f7f8f9e3deb21b2777ea6a3e, regionState=CLOSING, regionLocation=0a737713f6e3,42407,1734035781742 2024-12-12T20:42:06,612 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:42:06,612 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=216, ppid=215, state=RUNNABLE; CloseRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e, server=0a737713f6e3,42407,1734035781742}] 2024-12-12T20:42:06,612 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T20:42:06,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=217, ppid=214, state=RUNNABLE; CloseRegionProcedure d78bfcfd6b10390d29a02e632b308ca0, server=0a737713f6e3,36817,1734035782356}] 2024-12-12T20:42:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=212 2024-12-12T20:42:06,764 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,42407,1734035781742 2024-12-12T20:42:06,764 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0a737713f6e3,36817,1734035782356 2024-12-12T20:42:06,764 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] handler.UnassignRegionHandler(124): Close 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:42:06,764 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] handler.UnassignRegionHandler(124): Close d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:42:06,765 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:42:06,765 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T20:42:06,765 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1681): Closing 67760c03f7f8f9e3deb21b2777ea6a3e, disabling compactions & flushes 2024-12-12T20:42:06,765 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1681): Closing d78bfcfd6b10390d29a02e632b308ca0, disabling compactions & flushes 2024-12-12T20:42:06,765 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:42:06,765 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:42:06,765 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:42:06,765 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:42:06,765 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. after waiting 0 ms 2024-12-12T20:42:06,765 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:42:06,765 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. after waiting 0 ms 2024-12-12T20:42:06,765 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:42:06,770 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:42:06,771 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:06,771 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e. 2024-12-12T20:42:06,771 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1635): Region close journal for 67760c03f7f8f9e3deb21b2777ea6a3e: 2024-12-12T20:42:06,772 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] handler.UnassignRegionHandler(170): Closed 67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:42:06,772 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=215 updating hbase:meta row=67760c03f7f8f9e3deb21b2777ea6a3e, regionState=CLOSED 2024-12-12T20:42:06,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=216, resume processing ppid=215 2024-12-12T20:42:06,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=216, ppid=215, state=SUCCESS; CloseRegionProcedure 67760c03f7f8f9e3deb21b2777ea6a3e, server=0a737713f6e3,42407,1734035781742 in 161 msec 2024-12-12T20:42:06,775 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=215, ppid=213, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=67760c03f7f8f9e3deb21b2777ea6a3e, UNASSIGN in 165 msec 2024-12-12T20:42:06,779 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:42:06,779 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:06,780 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0. 2024-12-12T20:42:06,780 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1635): Region close journal for d78bfcfd6b10390d29a02e632b308ca0: 2024-12-12T20:42:06,781 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] handler.UnassignRegionHandler(170): Closed d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:42:06,781 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=214 updating hbase:meta row=d78bfcfd6b10390d29a02e632b308ca0, regionState=CLOSED 2024-12-12T20:42:06,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=217, resume processing ppid=214 2024-12-12T20:42:06,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=217, ppid=214, state=SUCCESS; CloseRegionProcedure d78bfcfd6b10390d29a02e632b308ca0, server=0a737713f6e3,36817,1734035782356 in 169 msec 2024-12-12T20:42:06,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=214, resume processing ppid=213 2024-12-12T20:42:06,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=214, ppid=213, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d78bfcfd6b10390d29a02e632b308ca0, UNASSIGN in 175 msec 2024-12-12T20:42:06,786 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=213, resume processing ppid=212 2024-12-12T20:42:06,786 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=213, ppid=212, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 177 msec 2024-12-12T20:42:06,787 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734036126787"}]},"ts":"1734036126787"} 2024-12-12T20:42:06,788 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-12T20:42:06,799 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-12T20:42:06,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=212, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 234 msec 2024-12-12T20:42:06,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=212 2024-12-12T20:42:06,870 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 212 completed 2024-12-12T20:42:06,871 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] procedure2.ProcedureExecutor(1098): Stored pid=218, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,872 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=218, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,873 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=218, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,877 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36817 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,891 DEBUG [HFileArchiver-43 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:42:06,895 DEBUG [HFileArchiver-43 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/recovered.edits] 2024-12-12T20:42:06,895 DEBUG [HFileArchiver-44 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:42:06,897 DEBUG [HFileArchiver-44 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/cf, FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/recovered.edits] 2024-12-12T20:42:06,899 DEBUG [HFileArchiver-40 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/cf/f4bb7985481845d1bd664a0f5d732c02 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/cf/f4bb7985481845d1bd664a0f5d732c02 2024-12-12T20:42:06,901 DEBUG [HFileArchiver-39 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/cf/6469c87665b342b0867a203ade8bd402 to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/cf/6469c87665b342b0867a203ade8bd402 2024-12-12T20:42:06,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,905 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-12T20:42:06,905 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-12T20:42:06,905 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-12T20:42:06,905 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-12T20:42:06,905 DEBUG [HFileArchiver-41 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0/recovered.edits/9.seqid 2024-12-12T20:42:06,910 DEBUG [HFileArchiver-43 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/d78bfcfd6b10390d29a02e632b308ca0 2024-12-12T20:42:06,914 DEBUG [HFileArchiver-37 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/recovered.edits/9.seqid to hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e/recovered.edits/9.seqid 2024-12-12T20:42:06,915 DEBUG [HFileArchiver-44 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testtb-testExportFileSystemStateWithSkipTmp/67760c03f7f8f9e3deb21b2777ea6a3e 2024-12-12T20:42:06,915 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-12T20:42:06,917 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=218, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:42:06,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:42:06,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:42:06,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T20:42:06,921 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:42:06,921 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:42:06,921 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:42:06,921 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T20:42:06,922 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-12T20:42:06,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=218 2024-12-12T20:42:06,925 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-12T20:42:06,927 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=218, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,927 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-12T20:42:06,927 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734036126927"}]},"ts":"9223372036854775807"} 2024-12-12T20:42:06,928 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734036126927"}]},"ts":"9223372036854775807"} 2024-12-12T20:42:06,931 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-12T20:42:06,931 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d78bfcfd6b10390d29a02e632b308ca0, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734036107451.d78bfcfd6b10390d29a02e632b308ca0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 67760c03f7f8f9e3deb21b2777ea6a3e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734036107451.67760c03f7f8f9e3deb21b2777ea6a3e.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T20:42:06,931 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-12T20:42:06,931 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734036126931"}]},"ts":"9223372036854775807"} 2024-12-12T20:42:06,937 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-12T20:42:06,948 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=218, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:06,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=218, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 77 msec 2024-12-12T20:42:07,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=218 2024-12-12T20:42:07,023 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 218 completed 2024-12-12T20:42:07,032 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-12T20:42:07,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:07,040 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-12T20:42:07,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:07,077 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=811 (was 813), OpenFileDescriptor=805 (was 817), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1052 (was 1232), ProcessCount=17 (was 17), AvailableMemoryMB=1250 (was 1591) 2024-12-12T20:42:07,077 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-12T20:42:07,077 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2861): Stopping mini mapreduce cluster... 2024-12-12T20:42:07,090 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f060b3e{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-12T20:42:07,094 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1dd43c4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T20:42:07,094 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T20:42:07,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d43ef08{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-12T20:42:07,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c3310d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,STOPPED} 2024-12-12T20:42:10,901 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734035798001_0010_000001 (auth:SIMPLE) from 127.0.0.1:36182 2024-12-12T20:42:10,912 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0010/container_1734035798001_0010_01_000001/launch_container.sh] 2024-12-12T20:42:10,912 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0010/container_1734035798001_0010_01_000001/container_tokens] 2024-12-12T20:42:10,912 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/MiniMRCluster_809723831/yarn-469071605/MiniMRCluster_809723831-localDir-nm-0_0/usercache/jenkins/appcache/application_1734035798001_0010/container_1734035798001_0010_01_000001/sysfs] 2024-12-12T20:42:11,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T20:42:12,296 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:42:13,827 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 039d8ca4d0468f0362e2baff27ee7042, had cached 0 bytes from a total of 5642 2024-12-12T20:42:19,301 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:42:24,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@bd830f5{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-12T20:42:24,156 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4afb2ea7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T20:42:24,156 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T20:42:24,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65442ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-12T20:42:24,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a3ef6b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,STOPPED} 2024-12-12T20:42:28,271 DEBUG [master/0a737713f6e3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 039d8ca4d0468f0362e2baff27ee7042 changed from -1.0 to 0.0, refreshing cache 2024-12-12T20:42:35,861 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region d5a17aa0c520a1cdf2abb50af9af5441, had cached 0 bytes from a total of 8324 2024-12-12T20:42:35,880 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1c36c2e04ccc3d3187435c7e0ba7d6c6, had cached 0 bytes from a total of 5288 2024-12-12T20:42:41,177 ERROR [Thread[Thread-420,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-12T20:42:41,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@158883e3{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-12T20:42:41,179 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@614a7d80{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T20:42:41,179 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T20:42:41,179 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e0bbdd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-12T20:42:41,179 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5195b339{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,STOPPED} 2024-12-12T20:42:41,201 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-12T20:42:41,218 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-12T20:42:41,219 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-12T20:42:41,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741830_1006 (size=959611) 2024-12-12T20:42:41,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741830_1006 (size=959611) 2024-12-12T20:42:41,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741830_1006 (size=959611) 2024-12-12T20:42:41,236 ERROR [Thread[Thread-443,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-12T20:42:41,254 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@46f85e31{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-12T20:42:41,255 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@53ed644b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T20:42:41,255 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T20:42:41,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e2866a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-12T20:42:41,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cce8bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,STOPPED} 2024-12-12T20:42:41,259 ERROR [Thread[Thread-402,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-12T20:42:41,259 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2864): Mini mapreduce cluster stopped 2024-12-12T20:42:41,259 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-12T20:42:41,259 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T20:42:41,259 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x20f4d7a0 to 127.0.0.1:52655 2024-12-12T20:42:41,259 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:42:41,260 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-12T20:42:41,260 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=7239121, stopped=false 2024-12-12T20:42:41,261 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:41,261 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-12T20:42:41,261 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=0a737713f6e3,43067,1734035780320 2024-12-12T20:42:41,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T20:42:41,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:42:41,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T20:42:41,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:42:41,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T20:42:41,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T20:42:41,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:42:41,311 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-12T20:42:41,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:42:41,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:42:41,312 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '0a737713f6e3,42407,1734035781742' ***** 2024-12-12T20:42:41,312 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:41,312 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-12T20:42:41,312 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '0a737713f6e3,46859,1734035782031' ***** 2024-12-12T20:42:41,312 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:41,312 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-12T20:42:41,312 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-12T20:42:41,312 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '0a737713f6e3,36817,1734035782356' ***** 2024-12-12T20:42:41,312 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-12T20:42:41,312 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-12T20:42:41,313 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-12T20:42:41,313 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T20:42:41,313 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:41,313 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-12T20:42:41,313 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T20:42:41,313 INFO [RS:1;0a737713f6e3:46859 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T20:42:41,313 INFO [RS:1;0a737713f6e3:46859 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T20:42:41,313 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T20:42:41,313 INFO [RS:0;0a737713f6e3:42407 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T20:42:41,313 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(3579): Received CLOSE for 419ba590b58c1bffeff3e689922ee9a3 2024-12-12T20:42:41,313 INFO [RS:0;0a737713f6e3:42407 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T20:42:41,313 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(3579): Received CLOSE for d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:42:41,314 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T20:42:41,314 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1224): stopping server 0a737713f6e3,46859,1734035782031 2024-12-12T20:42:41,314 DEBUG [RS:1;0a737713f6e3:46859 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:42:41,314 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T20:42:41,314 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T20:42:41,314 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T20:42:41,314 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1224): stopping server 0a737713f6e3,42407,1734035781742 2024-12-12T20:42:41,314 DEBUG [RS:0;0a737713f6e3:42407 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:42:41,314 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-12T20:42:41,314 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-12T20:42:41,314 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1603): Online Regions={d5a17aa0c520a1cdf2abb50af9af5441=testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441.} 2024-12-12T20:42:41,319 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-12T20:42:41,319 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1603): Online Regions={419ba590b58c1bffeff3e689922ee9a3=hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3., 1588230740=hbase:meta,,1.1588230740} 2024-12-12T20:42:41,319 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-12T20:42:41,319 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-12T20:42:41,320 INFO [RS:2;0a737713f6e3:36817 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T20:42:41,320 INFO [RS:2;0a737713f6e3:36817 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T20:42:41,320 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(3579): Received CLOSE for 039d8ca4d0468f0362e2baff27ee7042 2024-12-12T20:42:41,320 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(3579): Received CLOSE for 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:42:41,320 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1224): stopping server 0a737713f6e3,36817,1734035782356 2024-12-12T20:42:41,320 DEBUG [RS:2;0a737713f6e3:36817 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:42:41,320 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-12T20:42:41,320 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1603): Online Regions={039d8ca4d0468f0362e2baff27ee7042=hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042., 1c36c2e04ccc3d3187435c7e0ba7d6c6=testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6.} 2024-12-12T20:42:41,320 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1629): Waiting on 039d8ca4d0468f0362e2baff27ee7042, 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:42:41,320 DEBUG [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1629): Waiting on d5a17aa0c520a1cdf2abb50af9af5441 2024-12-12T20:42:41,320 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-12T20:42:41,321 INFO [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-12T20:42:41,321 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-12T20:42:41,321 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 419ba590b58c1bffeff3e689922ee9a3, disabling compactions & flushes 2024-12-12T20:42:41,321 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T20:42:41,321 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T20:42:41,321 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:42:41,321 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:42:41,321 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. after waiting 0 ms 2024-12-12T20:42:41,321 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:42:41,321 INFO [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=70.06 KB heapSize=111.35 KB 2024-12-12T20:42:41,321 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 419ba590b58c1bffeff3e689922ee9a3 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-12T20:42:41,322 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 419ba590b58c1bffeff3e689922ee9a3 2024-12-12T20:42:41,322 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing d5a17aa0c520a1cdf2abb50af9af5441, disabling compactions & flushes 2024-12-12T20:42:41,322 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:42:41,322 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:42:41,322 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. after waiting 0 ms 2024-12-12T20:42:41,322 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:42:41,323 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 039d8ca4d0468f0362e2baff27ee7042, disabling compactions & flushes 2024-12-12T20:42:41,323 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:42:41,323 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:42:41,323 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. after waiting 0 ms 2024-12-12T20:42:41,323 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:42:41,323 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 039d8ca4d0468f0362e2baff27ee7042 1/1 column families, dataSize=190 B heapSize=672 B 2024-12-12T20:42:41,331 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/d5a17aa0c520a1cdf2abb50af9af5441/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T20:42:41,331 INFO [regionserver/0a737713f6e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T20:42:41,332 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:41,332 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:42:41,332 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for d5a17aa0c520a1cdf2abb50af9af5441: 2024-12-12T20:42:41,332 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441. 2024-12-12T20:42:41,373 INFO [regionserver/0a737713f6e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T20:42:41,373 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/.tmp/l/fffe9f1a61e6469aa7d1aa438ca42679 is 68, key is testtb-testExportFileSystemStateWithSkipTmp/l:/1734036126873/DeleteFamily/seqid=0 2024-12-12T20:42:41,384 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/namespace/419ba590b58c1bffeff3e689922ee9a3/.tmp/info/f9727575866849f68208f5235938a3c1 is 45, key is default/info:d/1734035788857/Put/seqid=0 2024-12-12T20:42:41,386 INFO [regionserver/0a737713f6e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T20:42:41,400 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/.tmp/info/6c660e59e066442aaed3bbc6c5ff05d7 is 173, key is testExportExpiredSnapshot,1,1734036020420.d5a17aa0c520a1cdf2abb50af9af5441./info:regioninfo/1734036020915/Put/seqid=0 2024-12-12T20:42:41,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742380_1556 (size=5142) 2024-12-12T20:42:41,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742380_1556 (size=5142) 2024-12-12T20:42:41,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742380_1556 (size=5142) 2024-12-12T20:42:41,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742381_1557 (size=5037) 2024-12-12T20:42:41,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742381_1557 (size=5037) 2024-12-12T20:42:41,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742381_1557 (size=5037) 2024-12-12T20:42:41,449 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/namespace/419ba590b58c1bffeff3e689922ee9a3/.tmp/info/f9727575866849f68208f5235938a3c1 2024-12-12T20:42:41,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742382_1558 (size=16197) 2024-12-12T20:42:41,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742382_1558 (size=16197) 2024-12-12T20:42:41,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742382_1558 (size=16197) 2024-12-12T20:42:41,453 INFO [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=61.66 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/.tmp/info/6c660e59e066442aaed3bbc6c5ff05d7 2024-12-12T20:42:41,456 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/namespace/419ba590b58c1bffeff3e689922ee9a3/.tmp/info/f9727575866849f68208f5235938a3c1 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/namespace/419ba590b58c1bffeff3e689922ee9a3/info/f9727575866849f68208f5235938a3c1 2024-12-12T20:42:41,462 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/namespace/419ba590b58c1bffeff3e689922ee9a3/info/f9727575866849f68208f5235938a3c1, entries=2, sequenceid=6, filesize=4.9 K 2024-12-12T20:42:41,463 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 419ba590b58c1bffeff3e689922ee9a3 in 142ms, sequenceid=6, compaction requested=false 2024-12-12T20:42:41,470 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/namespace/419ba590b58c1bffeff3e689922ee9a3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T20:42:41,470 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:41,470 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:42:41,470 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 419ba590b58c1bffeff3e689922ee9a3: 2024-12-12T20:42:41,471 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734035787822.419ba590b58c1bffeff3e689922ee9a3. 2024-12-12T20:42:41,473 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/.tmp/rep_barrier/765a54f97d74487086008818bf86df79 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379./rep_barrier:/1734036017179/DeleteFamily/seqid=0 2024-12-12T20:42:41,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742383_1559 (size=8007) 2024-12-12T20:42:41,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742383_1559 (size=8007) 2024-12-12T20:42:41,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742383_1559 (size=8007) 2024-12-12T20:42:41,481 INFO [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/.tmp/rep_barrier/765a54f97d74487086008818bf86df79 2024-12-12T20:42:41,498 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/.tmp/table/fb46f19cead247cb9cf7a0dc6ced10a9 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734035986057.04bdedca79d2c7a6c82ea3b440f67379./table:/1734036017179/DeleteFamily/seqid=0 2024-12-12T20:42:41,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073742384_1560 (size=8861) 2024-12-12T20:42:41,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073742384_1560 (size=8861) 2024-12-12T20:42:41,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073742384_1560 (size=8861) 2024-12-12T20:42:41,502 INFO [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.06 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/.tmp/table/fb46f19cead247cb9cf7a0dc6ced10a9 2024-12-12T20:42:41,506 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/.tmp/info/6c660e59e066442aaed3bbc6c5ff05d7 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/info/6c660e59e066442aaed3bbc6c5ff05d7 2024-12-12T20:42:41,509 INFO [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/info/6c660e59e066442aaed3bbc6c5ff05d7, entries=89, sequenceid=206, filesize=15.8 K 2024-12-12T20:42:41,509 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/.tmp/rep_barrier/765a54f97d74487086008818bf86df79 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/rep_barrier/765a54f97d74487086008818bf86df79 2024-12-12T20:42:41,513 INFO [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/rep_barrier/765a54f97d74487086008818bf86df79, entries=21, sequenceid=206, filesize=7.8 K 2024-12-12T20:42:41,514 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/.tmp/table/fb46f19cead247cb9cf7a0dc6ced10a9 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/table/fb46f19cead247cb9cf7a0dc6ced10a9 2024-12-12T20:42:41,520 INFO [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/table/fb46f19cead247cb9cf7a0dc6ced10a9, entries=38, sequenceid=206, filesize=8.7 K 2024-12-12T20:42:41,521 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1250): stopping server 0a737713f6e3,42407,1734035781742; all regions closed. 2024-12-12T20:42:41,521 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1629): Waiting on 039d8ca4d0468f0362e2baff27ee7042, 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:42:41,522 INFO [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~70.06 KB/71741, heapSize ~111.30 KB/113976, currentSize=0 B/0 for 1588230740 in 201ms, sequenceid=206, compaction requested=false 2024-12-12T20:42:41,522 DEBUG [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T20:42:41,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741834_1010 (size=10762) 2024-12-12T20:42:41,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741834_1010 (size=10762) 2024-12-12T20:42:41,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741834_1010 (size=10762) 2024-12-12T20:42:41,527 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/meta/1588230740/recovered.edits/209.seqid, newMaxSeqId=209, maxSeqId=1 2024-12-12T20:42:41,528 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:41,528 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T20:42:41,528 INFO [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-12T20:42:41,528 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-12T20:42:41,528 DEBUG [RS_CLOSE_META-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-12T20:42:41,532 DEBUG [RS:0;0a737713f6e3:42407 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/oldWALs 2024-12-12T20:42:41,532 INFO [RS:0;0a737713f6e3:42407 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 0a737713f6e3%2C42407%2C1734035781742:(num 1734035786343) 2024-12-12T20:42:41,532 DEBUG [RS:0;0a737713f6e3:42407 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:42:41,532 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T20:42:41,533 INFO [RS:0;0a737713f6e3:42407 {}] hbase.ChoreService(370): Chore service for: regionserver/0a737713f6e3:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-12T20:42:41,533 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T20:42:41,533 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T20:42:41,533 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T20:42:41,533 INFO [regionserver/0a737713f6e3:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T20:42:41,533 INFO [RS:0;0a737713f6e3:42407 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42407 2024-12-12T20:42:41,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T20:42:41,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0a737713f6e3,42407,1734035781742 2024-12-12T20:42:41,552 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0a737713f6e3,42407,1734035781742] 2024-12-12T20:42:41,552 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 0a737713f6e3,42407,1734035781742; numProcessing=1 2024-12-12T20:42:41,560 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/0a737713f6e3,42407,1734035781742 already deleted, retry=false 2024-12-12T20:42:41,560 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 0a737713f6e3,42407,1734035781742 expired; onlineServers=2 2024-12-12T20:42:41,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T20:42:41,652 INFO [RS:0;0a737713f6e3:42407 {}] regionserver.HRegionServer(1307): Exiting; stopping=0a737713f6e3,42407,1734035781742; zookeeper connection closed. 2024-12-12T20:42:41,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42407-0x1001bf54bb60001, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T20:42:41,652 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@60016a3 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@60016a3 2024-12-12T20:42:41,721 DEBUG [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1629): Waiting on 039d8ca4d0468f0362e2baff27ee7042, 1c36c2e04ccc3d3187435c7e0ba7d6c6 2024-12-12T20:42:41,722 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1250): stopping server 0a737713f6e3,46859,1734035782031; all regions closed. 2024-12-12T20:42:41,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741836_1012 (size=82323) 2024-12-12T20:42:41,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741836_1012 (size=82323) 2024-12-12T20:42:41,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741836_1012 (size=82323) 2024-12-12T20:42:41,729 DEBUG [RS:1;0a737713f6e3:46859 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/oldWALs 2024-12-12T20:42:41,729 INFO [RS:1;0a737713f6e3:46859 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 0a737713f6e3%2C46859%2C1734035782031.meta:.meta(num 1734035787058) 2024-12-12T20:42:41,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741835_1011 (size=16634) 2024-12-12T20:42:41,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741835_1011 (size=16634) 2024-12-12T20:42:41,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741835_1011 (size=16634) 2024-12-12T20:42:41,737 DEBUG [RS:1;0a737713f6e3:46859 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/oldWALs 2024-12-12T20:42:41,737 INFO [RS:1;0a737713f6e3:46859 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 0a737713f6e3%2C46859%2C1734035782031:(num 1734035786353) 2024-12-12T20:42:41,737 DEBUG [RS:1;0a737713f6e3:46859 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:42:41,737 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T20:42:41,737 INFO [RS:1;0a737713f6e3:46859 {}] hbase.ChoreService(370): Chore service for: regionserver/0a737713f6e3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-12T20:42:41,737 INFO [regionserver/0a737713f6e3:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T20:42:41,737 INFO [RS:1;0a737713f6e3:46859 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46859 2024-12-12T20:42:41,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0a737713f6e3,46859,1734035782031 2024-12-12T20:42:41,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T20:42:41,757 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0a737713f6e3,46859,1734035782031] 2024-12-12T20:42:41,757 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 0a737713f6e3,46859,1734035782031; numProcessing=2 2024-12-12T20:42:41,765 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/0a737713f6e3,46859,1734035782031 already deleted, retry=false 2024-12-12T20:42:41,765 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 0a737713f6e3,46859,1734035782031 expired; onlineServers=1 2024-12-12T20:42:41,840 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=33 (bloomFilter=false), to=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/.tmp/l/fffe9f1a61e6469aa7d1aa438ca42679 2024-12-12T20:42:41,845 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fffe9f1a61e6469aa7d1aa438ca42679 2024-12-12T20:42:41,846 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/.tmp/l/fffe9f1a61e6469aa7d1aa438ca42679 as hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/l/fffe9f1a61e6469aa7d1aa438ca42679 2024-12-12T20:42:41,852 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fffe9f1a61e6469aa7d1aa438ca42679 2024-12-12T20:42:41,852 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/l/fffe9f1a61e6469aa7d1aa438ca42679, entries=2, sequenceid=33, filesize=5.0 K 2024-12-12T20:42:41,853 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 039d8ca4d0468f0362e2baff27ee7042 in 530ms, sequenceid=33, compaction requested=false 2024-12-12T20:42:41,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T20:42:41,857 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/hbase/acl/039d8ca4d0468f0362e2baff27ee7042/recovered.edits/36.seqid, newMaxSeqId=36, maxSeqId=27 2024-12-12T20:42:41,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46859-0x1001bf54bb60002, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T20:42:41,858 INFO [RS:1;0a737713f6e3:46859 {}] regionserver.HRegionServer(1307): Exiting; stopping=0a737713f6e3,46859,1734035782031; zookeeper connection closed. 2024-12-12T20:42:41,858 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@526ce1f0 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@526ce1f0 2024-12-12T20:42:41,863 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:41,863 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:42:41,864 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 039d8ca4d0468f0362e2baff27ee7042: 2024-12-12T20:42:41,864 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1734035789115.039d8ca4d0468f0362e2baff27ee7042. 2024-12-12T20:42:41,864 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 1c36c2e04ccc3d3187435c7e0ba7d6c6, disabling compactions & flushes 2024-12-12T20:42:41,864 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:42:41,864 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:42:41,864 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. after waiting 0 ms 2024-12-12T20:42:41,864 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:42:41,867 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/data/default/testExportExpiredSnapshot/1c36c2e04ccc3d3187435c7e0ba7d6c6/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T20:42:41,868 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:41,868 INFO [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:42:41,868 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 1c36c2e04ccc3d3187435c7e0ba7d6c6: 2024-12-12T20:42:41,868 DEBUG [RS_CLOSE_REGION-regionserver/0a737713f6e3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1734036020420.1c36c2e04ccc3d3187435c7e0ba7d6c6. 2024-12-12T20:42:41,921 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1250): stopping server 0a737713f6e3,36817,1734035782356; all regions closed. 2024-12-12T20:42:41,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41061 is added to blk_1073741833_1009 (size=11925) 2024-12-12T20:42:41,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36349 is added to blk_1073741833_1009 (size=11925) 2024-12-12T20:42:41,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32781 is added to blk_1073741833_1009 (size=11925) 2024-12-12T20:42:41,940 DEBUG [RS:2;0a737713f6e3:36817 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/oldWALs 2024-12-12T20:42:41,941 INFO [RS:2;0a737713f6e3:36817 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 0a737713f6e3%2C36817%2C1734035782356:(num 1734035786342) 2024-12-12T20:42:41,941 DEBUG [RS:2;0a737713f6e3:36817 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:42:41,941 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T20:42:41,941 INFO [RS:2;0a737713f6e3:36817 {}] hbase.ChoreService(370): Chore service for: regionserver/0a737713f6e3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-12T20:42:41,941 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T20:42:41,941 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T20:42:41,941 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T20:42:41,941 INFO [regionserver/0a737713f6e3:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T20:42:41,941 INFO [RS:2;0a737713f6e3:36817 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36817 2024-12-12T20:42:41,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T20:42:41,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0a737713f6e3,36817,1734035782356 2024-12-12T20:42:41,960 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0a737713f6e3,36817,1734035782356] 2024-12-12T20:42:41,960 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 0a737713f6e3,36817,1734035782356; numProcessing=3 2024-12-12T20:42:41,968 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/0a737713f6e3,36817,1734035782356 already deleted, retry=false 2024-12-12T20:42:41,968 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 0a737713f6e3,36817,1734035782356 expired; onlineServers=0 2024-12-12T20:42:41,968 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '0a737713f6e3,43067,1734035780320' ***** 2024-12-12T20:42:41,968 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-12T20:42:41,969 DEBUG [M:0;0a737713f6e3:43067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a08e883, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0a737713f6e3/172.17.0.2:0 2024-12-12T20:42:41,969 INFO [M:0;0a737713f6e3:43067 {}] regionserver.HRegionServer(1224): stopping server 0a737713f6e3,43067,1734035780320 2024-12-12T20:42:41,969 INFO [M:0;0a737713f6e3:43067 {}] regionserver.HRegionServer(1250): stopping server 0a737713f6e3,43067,1734035780320; all regions closed. 2024-12-12T20:42:41,969 DEBUG [M:0;0a737713f6e3:43067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T20:42:41,969 DEBUG [M:0;0a737713f6e3:43067 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-12T20:42:41,969 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-12T20:42:41,969 DEBUG [M:0;0a737713f6e3:43067 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-12T20:42:41,969 DEBUG [master/0a737713f6e3:0:becomeActiveMaster-HFileCleaner.large.0-1734035785526 {}] cleaner.HFileCleaner(306): Exit Thread[master/0a737713f6e3:0:becomeActiveMaster-HFileCleaner.large.0-1734035785526,5,FailOnTimeoutGroup] 2024-12-12T20:42:41,969 DEBUG [master/0a737713f6e3:0:becomeActiveMaster-HFileCleaner.small.0-1734035785536 {}] cleaner.HFileCleaner(306): Exit Thread[master/0a737713f6e3:0:becomeActiveMaster-HFileCleaner.small.0-1734035785536,5,FailOnTimeoutGroup] 2024-12-12T20:42:41,970 INFO [M:0;0a737713f6e3:43067 {}] hbase.ChoreService(370): Chore service for: master/0a737713f6e3:0 had [] on shutdown 2024-12-12T20:42:41,970 DEBUG [M:0;0a737713f6e3:43067 {}] master.HMaster(1733): Stopping service threads 2024-12-12T20:42:41,970 INFO [M:0;0a737713f6e3:43067 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-12T20:42:41,971 INFO [M:0;0a737713f6e3:43067 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-12T20:42:41,971 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-12T20:42:41,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-12T20:42:41,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T20:42:41,977 DEBUG [M:0;0a737713f6e3:43067 {}] zookeeper.ZKUtil(347): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-12T20:42:41,977 WARN [M:0;0a737713f6e3:43067 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-12T20:42:41,977 INFO [M:0;0a737713f6e3:43067 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-12T20:42:41,977 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T20:42:41,977 INFO [M:0;0a737713f6e3:43067 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-12T20:42:41,978 DEBUG [M:0;0a737713f6e3:43067 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T20:42:41,993 INFO [M:0;0a737713f6e3:43067 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T20:42:41,994 DEBUG [M:0;0a737713f6e3:43067 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T20:42:41,994 DEBUG [M:0;0a737713f6e3:43067 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T20:42:41,994 DEBUG [M:0;0a737713f6e3:43067 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T20:42:41,994 INFO [M:0;0a737713f6e3:43067 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=815.47 KB heapSize=980.30 KB 2024-12-12T20:42:41,994 ERROR [AsyncFSWAL-0-hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData-prefix:0a737713f6e3,43067,1734035780320 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData-prefix:0a737713f6e3,43067,1734035780320,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:419) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:132) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:830) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:128) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1148) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.appendAndSync(AsyncFSWAL.java:500) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.consume(AsyncFSWAL.java:603) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T20:42:42,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T20:42:42,060 INFO [RS:2;0a737713f6e3:36817 {}] regionserver.HRegionServer(1307): Exiting; stopping=0a737713f6e3,36817,1734035782356; zookeeper connection closed. 2024-12-12T20:42:42,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36817-0x1001bf54bb60003, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T20:42:42,060 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5431e7d7 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5431e7d7 2024-12-12T20:42:42,061 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-12T20:42:46,848 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:42:49,301 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:42:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-12T20:42:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-12T20:42:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-12T20:42:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-12T20:42:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T20:42:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T20:42:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-12T20:42:56,628 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T20:43:19,302 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;0a737713f6e3:43067 227 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@2d4bc92b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 12 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41222f30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 3 Waited count: 26 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4501 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.CountDownLatch$Sync@27f2d048 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11952 Waited count: 12552 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@7e9e8767 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2e956c2b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@15b6454e): State: TIMED_WAITING Blocked count: 0 Waited count: 894 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp912985739-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp912985739-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp912985739-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp912985739-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp912985739-41-acceptor-0@5a42dadc-ServerConnector@47697099{HTTP/1.1, (http/1.1)}{localhost:33397}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp912985739-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp912985739-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp912985739-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-c85103e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 21 Waited count: 2918 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21a66961 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37615): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5a042424): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4da42864): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 42575 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1295 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79cbbdb9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37615): State: TIMED_WAITING Blocked count: 76 Waited count: 2170 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37615): State: TIMED_WAITING Blocked count: 87 Waited count: 2180 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37615): State: TIMED_WAITING Blocked count: 78 Waited count: 2185 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37615): State: TIMED_WAITING Blocked count: 68 Waited count: 2177 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37615): State: TIMED_WAITING Blocked count: 73 Waited count: 2178 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@29a743c3): State: TIMED_WAITING Blocked count: 0 Waited count: 223 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@6a5e8b16): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@ca0256c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@55cbb73f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1765406893)): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp655409990-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp655409990-87-acceptor-0@4c0c4250-ServerConnector@40605cbf{HTTP/1.1, (http/1.1)}{localhost:44175}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp655409990-88): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp655409990-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-5c2fc490-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e070830): State: TIMED_WAITING Blocked count: 0 Waited count: 889 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44071): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 242 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38a0806 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1303 Waited count: 1459 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2c6f9029): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 446 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 110 (IPC Client (874193583) connection to localhost/127.0.0.1:37615 from jenkins): State: TIMED_WAITING Blocked count: 1333 Waited count: 1334 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 111 (IPC Parameter Sending Thread for localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 0 Waited count: 1832 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp63234285-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp63234285-121-acceptor-0@4696f95-ServerConnector@70160452{HTTP/1.1, (http/1.1)}{localhost:43199}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp63234285-122): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp63234285-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-cc0c5e6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6b027e55): State: TIMED_WAITING Blocked count: 0 Waited count: 888 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 35687): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 286 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@14a64890 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1280 Waited count: 1464 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@14340b79): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 446 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 446 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1723970414-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1723970414-154-acceptor-0@3e80f22e-ServerConnector@440e14e3{HTTP/1.1, (http/1.1)}{localhost:38249}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1723970414-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1723970414-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-60850f53-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@78b7b99b): State: TIMED_WAITING Blocked count: 0 Waited count: 888 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 36029): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 230 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6d34dc04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1267 Waited count: 1458 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@73f930c7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 448 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@152e21a5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@5f6ec91f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@3c2ad454[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:52655): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 222 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 13 Waited count: 749 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33724647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:52655):): State: WAITING Blocked count: 0 Waited count: 850 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a631fb2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 874 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5201803c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a3c732d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 328 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:52655)): State: RUNNABLE Blocked count: 61 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 6 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76de061b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3701261f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 2 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 4 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 269 Waited count: 1025 Waiting on java.util.concurrent.Semaphore$NonfairSync@5ecd8ec9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@757c448c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067): State: WAITING Blocked count: 33 Waited count: 217 Waiting on java.util.concurrent.Semaphore$NonfairSync@73947ae6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067): State: WAITING Blocked count: 42 Waited count: 10347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d1b9b55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f12fff9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f12fff9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@68dd8cb3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@384e9804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@299e6d6b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@419bb296 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 289 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 43 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;0a737713f6e3:43067): State: TIMED_WAITING Blocked count: 6 Waited count: 3775 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$938/0x00007fe304eeee50.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@240dc3a0): State: TIMED_WAITING Blocked count: 0 Waited count: 147 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4366 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 397 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 20 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 36 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 43628 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 55 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1236bdde Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 477 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@163387d9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@169efedc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b334a53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 507 (LeaseRenewer:jenkins.hfs.2@localhost:37615): State: TIMED_WAITING Blocked count: 12 Waited count: 455 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 519 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 12 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 562 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 43216 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 591 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 364 Waiting on java.util.concurrent.ForkJoinPool@16cb89b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 592 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 301 Waiting on java.util.concurrent.ForkJoinPool@16cb89b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 608 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 609 (region-location-2): State: WAITING Blocked count: 6 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 610 (region-location-3): State: WAITING Blocked count: 5 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1019 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 31 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1108 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1123 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 64 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@568240db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1182 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1183 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1184 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1280 (Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5): State: TIMED_WAITING Blocked count: 1 Waited count: 81 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1281 (Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1309 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 503 Waiting on java.util.concurrent.ForkJoinPool@16cb89b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1540 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 33 Waiting on java.util.TaskQueue@6f227bb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1712 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 486 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1726 (Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4): State: TIMED_WAITING Blocked count: 1 Waited count: 130 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1727 (Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3): State: TIMED_WAITING Blocked count: 1 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2077 (region-location-4): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4879 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4880 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4881 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8753 (AsyncFSWAL-1-hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData-prefix:0a737713f6e3,43067,1734035780320): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ec11293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8756 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-12T20:43:49,302 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:44:19,302 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;0a737713f6e3:43067 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@2d4bc92b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 12 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 24 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41222f30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 3 Waited count: 29 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.CountDownLatch$Sync@2a21d011 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11952 Waited count: 12553 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@7e9e8767 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2e956c2b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@15b6454e): State: TIMED_WAITING Blocked count: 0 Waited count: 1014 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp912985739-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp912985739-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp912985739-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp912985739-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp912985739-41-acceptor-0@5a42dadc-ServerConnector@47697099{HTTP/1.1, (http/1.1)}{localhost:33397}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp912985739-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp912985739-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp912985739-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-c85103e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 21 Waited count: 2918 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21a66961 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37615): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5a042424): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4da42864): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 171 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 48530 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1295 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79cbbdb9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37615): State: TIMED_WAITING Blocked count: 76 Waited count: 2231 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37615): State: TIMED_WAITING Blocked count: 87 Waited count: 2241 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37615): State: TIMED_WAITING Blocked count: 78 Waited count: 2246 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37615): State: TIMED_WAITING Blocked count: 68 Waited count: 2238 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37615): State: TIMED_WAITING Blocked count: 73 Waited count: 2238 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@29a743c3): State: TIMED_WAITING Blocked count: 0 Waited count: 253 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@6a5e8b16): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@ca0256c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@55cbb73f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1765406893)): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp655409990-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp655409990-87-acceptor-0@4c0c4250-ServerConnector@40605cbf{HTTP/1.1, (http/1.1)}{localhost:44175}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp655409990-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp655409990-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-5c2fc490-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e070830): State: TIMED_WAITING Blocked count: 0 Waited count: 1009 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44071): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 262 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38a0806 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1323 Waited count: 1499 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2c6f9029): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 506 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 505 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 505 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 110 (IPC Client (874193583) connection to localhost/127.0.0.1:37615 from jenkins): State: TIMED_WAITING Blocked count: 1391 Waited count: 1392 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 111 (IPC Parameter Sending Thread for localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 0 Waited count: 1892 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp63234285-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp63234285-121-acceptor-0@4696f95-ServerConnector@70160452{HTTP/1.1, (http/1.1)}{localhost:43199}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp63234285-122): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp63234285-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-cc0c5e6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6b027e55): State: TIMED_WAITING Blocked count: 0 Waited count: 1008 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 35687): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 306 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@14a64890 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1300 Waited count: 1504 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@14340b79): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 506 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 506 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1723970414-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1723970414-154-acceptor-0@3e80f22e-ServerConnector@440e14e3{HTTP/1.1, (http/1.1)}{localhost:38249}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1723970414-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1723970414-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-60850f53-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@78b7b99b): State: TIMED_WAITING Blocked count: 0 Waited count: 1008 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 36029): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 250 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6d34dc04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1287 Waited count: 1498 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@73f930c7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@152e21a5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@5f6ec91f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@3c2ad454[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:52655): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 252 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 13 Waited count: 754 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33724647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:52655):): State: WAITING Blocked count: 0 Waited count: 855 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a631fb2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 879 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5201803c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a3c732d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 369 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:52655)): State: RUNNABLE Blocked count: 61 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 6 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76de061b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3701261f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 2 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 4 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 269 Waited count: 1025 Waiting on java.util.concurrent.Semaphore$NonfairSync@5ecd8ec9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@757c448c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067): State: WAITING Blocked count: 33 Waited count: 217 Waiting on java.util.concurrent.Semaphore$NonfairSync@73947ae6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067): State: WAITING Blocked count: 42 Waited count: 10347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d1b9b55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f12fff9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f12fff9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@68dd8cb3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@384e9804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@299e6d6b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@419bb296 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 289 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 43 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;0a737713f6e3:43067): State: TIMED_WAITING Blocked count: 6 Waited count: 3775 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$938/0x00007fe304eeee50.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@240dc3a0): State: TIMED_WAITING Blocked count: 0 Waited count: 167 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4966 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 397 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 20 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 36 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fd9fe95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49630 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 55 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1236bdde Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 477 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@163387d9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@169efedc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b334a53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 12 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 562 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49219 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 591 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 364 Waiting on java.util.concurrent.ForkJoinPool@16cb89b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 592 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 301 Waiting on java.util.concurrent.ForkJoinPool@16cb89b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 608 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 609 (region-location-2): State: WAITING Blocked count: 6 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 610 (region-location-3): State: WAITING Blocked count: 5 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1019 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 470 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 31 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1108 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1123 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 64 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@568240db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1182 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1183 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1184 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1309 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1540 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 33 Waiting on java.util.TaskQueue@6f227bb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2077 (region-location-4): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4879 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4880 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4881 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8753 (AsyncFSWAL-1-hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData-prefix:0a737713f6e3,43067,1734035780320): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ec11293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8756 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-12T20:44:49,302 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:45:19,302 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;0a737713f6e3:43067 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@2d4bc92b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 12 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 32 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41222f30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 58 Waiting on java.util.concurrent.CountDownLatch$Sync@de70fc8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11952 Waited count: 12554 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@7e9e8767 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2e956c2b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@15b6454e): State: TIMED_WAITING Blocked count: 0 Waited count: 1134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp912985739-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp912985739-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp912985739-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp912985739-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp912985739-41-acceptor-0@5a42dadc-ServerConnector@47697099{HTTP/1.1, (http/1.1)}{localhost:33397}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp912985739-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp912985739-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp912985739-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-c85103e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 21 Waited count: 2918 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21a66961 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37615): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5a042424): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4da42864): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 54485 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1295 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79cbbdb9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37615): State: TIMED_WAITING Blocked count: 76 Waited count: 2291 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37615): State: TIMED_WAITING Blocked count: 87 Waited count: 2302 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37615): State: TIMED_WAITING Blocked count: 78 Waited count: 2307 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37615): State: TIMED_WAITING Blocked count: 68 Waited count: 2298 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37615): State: TIMED_WAITING Blocked count: 73 Waited count: 2300 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@29a743c3): State: TIMED_WAITING Blocked count: 0 Waited count: 283 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@6a5e8b16): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@ca0256c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@55cbb73f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1765406893)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp655409990-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp655409990-87-acceptor-0@4c0c4250-ServerConnector@40605cbf{HTTP/1.1, (http/1.1)}{localhost:44175}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp655409990-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp655409990-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-5c2fc490-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e070830): State: TIMED_WAITING Blocked count: 0 Waited count: 1129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44071): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 282 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38a0806 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1343 Waited count: 1539 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2c6f9029): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 566 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 110 (IPC Client (874193583) connection to localhost/127.0.0.1:37615 from jenkins): State: TIMED_WAITING Blocked count: 1449 Waited count: 1450 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 111 (IPC Parameter Sending Thread for localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 0 Waited count: 1952 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp63234285-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp63234285-121-acceptor-0@4696f95-ServerConnector@70160452{HTTP/1.1, (http/1.1)}{localhost:43199}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp63234285-122): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp63234285-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-cc0c5e6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6b027e55): State: TIMED_WAITING Blocked count: 0 Waited count: 1128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 35687): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 326 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@14a64890 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1320 Waited count: 1544 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@14340b79): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 567 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 567 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 566 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 566 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1723970414-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1723970414-154-acceptor-0@3e80f22e-ServerConnector@440e14e3{HTTP/1.1, (http/1.1)}{localhost:38249}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1723970414-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1723970414-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-60850f53-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@78b7b99b): State: TIMED_WAITING Blocked count: 0 Waited count: 1128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 36029): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 270 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6d34dc04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1307 Waited count: 1538 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@73f930c7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@152e21a5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@5f6ec91f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@3c2ad454[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:52655): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 282 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 13 Waited count: 758 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33724647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:52655):): State: WAITING Blocked count: 0 Waited count: 859 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a631fb2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 883 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5201803c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a3c732d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 415 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:52655)): State: RUNNABLE Blocked count: 61 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 6 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76de061b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3701261f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 2 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 4 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 269 Waited count: 1025 Waiting on java.util.concurrent.Semaphore$NonfairSync@5ecd8ec9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@757c448c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067): State: WAITING Blocked count: 33 Waited count: 217 Waiting on java.util.concurrent.Semaphore$NonfairSync@73947ae6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067): State: WAITING Blocked count: 42 Waited count: 10347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d1b9b55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f12fff9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f12fff9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@68dd8cb3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@384e9804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@299e6d6b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@419bb296 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 289 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 43 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;0a737713f6e3:43067): State: TIMED_WAITING Blocked count: 6 Waited count: 3775 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$938/0x00007fe304eeee50.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@240dc3a0): State: TIMED_WAITING Blocked count: 0 Waited count: 187 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5566 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 397 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 20 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 36 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fd9fe95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55632 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 55 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1236bdde Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 477 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@163387d9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@169efedc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b334a53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 12 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 562 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55221 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 591 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 365 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 592 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 301 Waiting on java.util.concurrent.ForkJoinPool@16cb89b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 608 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 609 (region-location-2): State: WAITING Blocked count: 6 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 610 (region-location-3): State: WAITING Blocked count: 5 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1019 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 476 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 31 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1108 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1123 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 64 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@568240db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1182 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1183 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1184 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1540 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 33 Waiting on java.util.TaskQueue@6f227bb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2077 (region-location-4): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4879 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4880 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4881 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8753 (AsyncFSWAL-1-hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData-prefix:0a737713f6e3,43067,1734035780320): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ec11293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8756 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8757 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-12T20:45:49,303 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:46:19,303 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:46:22,680 DEBUG [master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=23, reuseRatio=69.70% 2024-12-12T20:46:22,681 DEBUG [master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-12T20:46:32,721 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;0a737713f6e3:43067 220 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@2d4bc92b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 12 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41222f30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6300 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 64 Waiting on java.util.concurrent.CountDownLatch$Sync@5b856a4f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11952 Waited count: 12555 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@7e9e8767 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2e956c2b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@15b6454e): State: TIMED_WAITING Blocked count: 0 Waited count: 1254 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp912985739-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp912985739-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp912985739-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp912985739-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp912985739-41-acceptor-0@5a42dadc-ServerConnector@47697099{HTTP/1.1, (http/1.1)}{localhost:33397}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp912985739-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp912985739-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp912985739-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-c85103e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 21 Waited count: 2918 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21a66961 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37615): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5a042424): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 212 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4da42864): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 211 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 60437 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1295 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79cbbdb9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37615): State: TIMED_WAITING Blocked count: 76 Waited count: 2352 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37615): State: TIMED_WAITING Blocked count: 87 Waited count: 2363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37615): State: TIMED_WAITING Blocked count: 78 Waited count: 2368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37615): State: TIMED_WAITING Blocked count: 68 Waited count: 2359 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37615): State: TIMED_WAITING Blocked count: 73 Waited count: 2361 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@29a743c3): State: TIMED_WAITING Blocked count: 0 Waited count: 313 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@6a5e8b16): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@ca0256c): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@55cbb73f): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1765406893)): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp655409990-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp655409990-87-acceptor-0@4c0c4250-ServerConnector@40605cbf{HTTP/1.1, (http/1.1)}{localhost:44175}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp655409990-88): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp655409990-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-5c2fc490-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e070830): State: TIMED_WAITING Blocked count: 0 Waited count: 1249 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44071): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 302 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38a0806 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1363 Waited count: 1579 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2c6f9029): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 626 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 110 (IPC Client (874193583) connection to localhost/127.0.0.1:37615 from jenkins): State: TIMED_WAITING Blocked count: 1509 Waited count: 1510 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 111 (IPC Parameter Sending Thread for localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 0 Waited count: 2012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp63234285-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp63234285-121-acceptor-0@4696f95-ServerConnector@70160452{HTTP/1.1, (http/1.1)}{localhost:43199}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp63234285-122): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp63234285-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-cc0c5e6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6b027e55): State: TIMED_WAITING Blocked count: 0 Waited count: 1248 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 35687): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 346 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@14a64890 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1340 Waited count: 1584 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@14340b79): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 627 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 627 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 626 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 626 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1723970414-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1723970414-154-acceptor-0@3e80f22e-ServerConnector@440e14e3{HTTP/1.1, (http/1.1)}{localhost:38249}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1723970414-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1723970414-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-60850f53-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@78b7b99b): State: TIMED_WAITING Blocked count: 0 Waited count: 1248 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 36029): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 290 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6d34dc04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1327 Waited count: 1578 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@73f930c7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6d4b38a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592fc1c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@152e21a5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@5f6ec91f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28981250 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@3c2ad454[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:52655): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 312 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 13 Waited count: 762 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33724647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:52655):): State: WAITING Blocked count: 0 Waited count: 863 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a631fb2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 887 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5201803c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a3c732d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:52655)): State: RUNNABLE Blocked count: 61 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 6 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76de061b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3701261f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 2 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 4 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 269 Waited count: 1025 Waiting on java.util.concurrent.Semaphore$NonfairSync@5ecd8ec9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@757c448c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067): State: WAITING Blocked count: 33 Waited count: 217 Waiting on java.util.concurrent.Semaphore$NonfairSync@73947ae6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067): State: WAITING Blocked count: 42 Waited count: 10347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d1b9b55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f12fff9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f12fff9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@68dd8cb3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@384e9804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@299e6d6b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@419bb296 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 289 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 43 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;0a737713f6e3:43067): State: TIMED_WAITING Blocked count: 6 Waited count: 3775 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$938/0x00007fe304eeee50.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@240dc3a0): State: TIMED_WAITING Blocked count: 0 Waited count: 207 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6165 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 397 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 20 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 36 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fd9fe95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61633 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 55 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1236bdde Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 477 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@163387d9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@169efedc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b334a53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 12 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 562 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61223 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 592 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 302 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 608 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 609 (region-location-2): State: WAITING Blocked count: 6 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 610 (region-location-3): State: WAITING Blocked count: 5 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1019 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 482 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 31 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1108 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1123 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 64 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@568240db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1182 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1183 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1184 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1540 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 33 Waiting on java.util.TaskQueue@6f227bb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2077 (region-location-4): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4879 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4880 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4881 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8753 (AsyncFSWAL-1-hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData-prefix:0a737713f6e3,43067,1734035780320): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ec11293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8757 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8761 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-12T20:46:49,303 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:47:19,303 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T20:47:41,995 DEBUG [M:0;0a737713f6e3:43067 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T20:47:41,995 WARN [M:0;0a737713f6e3:43067 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3796, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:883) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3796, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) ~[classes/:?] ... 20 more 2024-12-12T20:47:41,999 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(163): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:396) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:243) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:201) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:236) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:160) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T20:47:42,002 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-12T20:47:42,002 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-12T20:47:42,002 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/WALs/0a737713f6e3,43067,1734035780320/0a737713f6e3%2C43067%2C1734035780320.1734035783556 2024-12-12T20:47:42,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/WALs/0a737713f6e3,43067,1734035780320/0a737713f6e3%2C43067%2C1734035780320.1734035783556 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T20:47:42,003 WARN [Close-WAL-Writer-0 {}] wal.AsyncFSWAL(734): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T20:47:42,003 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/WALs/0a737713f6e3,43067,1734035780320/0a737713f6e3%2C43067%2C1734035780320.1734035783556 2024-12-12T20:47:42,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/WALs/0a737713f6e3,43067,1734035780320/0a737713f6e3%2C43067%2C1734035780320.1734035783556 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;0a737713f6e3:43067 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@2d4bc92b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 12 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41222f30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6900 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 70 Waiting on java.util.concurrent.CountDownLatch$Sync@4a2c4111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11952 Waited count: 12556 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@7e9e8767 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2e956c2b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@15b6454e): State: TIMED_WAITING Blocked count: 0 Waited count: 1374 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp912985739-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp912985739-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp912985739-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp912985739-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp912985739-41-acceptor-0@5a42dadc-ServerConnector@47697099{HTTP/1.1, (http/1.1)}{localhost:33397}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp912985739-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp912985739-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp912985739-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-c85103e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 21 Waited count: 2918 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21a66961 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37615): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5a042424): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4da42864): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 231 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 66393 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1295 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79cbbdb9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37615): State: TIMED_WAITING Blocked count: 76 Waited count: 2413 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37615): State: TIMED_WAITING Blocked count: 87 Waited count: 2424 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37615): State: TIMED_WAITING Blocked count: 78 Waited count: 2428 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37615): State: TIMED_WAITING Blocked count: 68 Waited count: 2420 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37615): State: TIMED_WAITING Blocked count: 74 Waited count: 2422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@29a743c3): State: TIMED_WAITING Blocked count: 0 Waited count: 343 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@6a5e8b16): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@ca0256c): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@55cbb73f): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1765406893)): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp655409990-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp655409990-87-acceptor-0@4c0c4250-ServerConnector@40605cbf{HTTP/1.1, (http/1.1)}{localhost:44175}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp655409990-88): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp655409990-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-5c2fc490-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e070830): State: TIMED_WAITING Blocked count: 0 Waited count: 1369 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44071): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 137 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 322 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38a0806 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1383 Waited count: 1619 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2c6f9029): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 686 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 685 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 685 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44071): State: TIMED_WAITING Blocked count: 0 Waited count: 690 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 110 (IPC Client (874193583) connection to localhost/127.0.0.1:37615 from jenkins): State: TIMED_WAITING Blocked count: 1565 Waited count: 1566 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 111 (IPC Parameter Sending Thread for localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 0 Waited count: 2072 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp63234285-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp63234285-121-acceptor-0@4696f95-ServerConnector@70160452{HTTP/1.1, (http/1.1)}{localhost:43199}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp63234285-122): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp63234285-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-cc0c5e6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6b027e55): State: TIMED_WAITING Blocked count: 0 Waited count: 1368 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 35687): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 137 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 366 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@14a64890 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1360 Waited count: 1624 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@14340b79): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 687 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 687 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 686 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 35687): State: TIMED_WAITING Blocked count: 0 Waited count: 686 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1723970414-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe304428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1723970414-154-acceptor-0@3e80f22e-ServerConnector@440e14e3{HTTP/1.1, (http/1.1)}{localhost:38249}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1723970414-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1723970414-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-60850f53-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@78b7b99b): State: TIMED_WAITING Blocked count: 0 Waited count: 1368 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 36029): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 137 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 310 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6d34dc04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615): State: TIMED_WAITING Blocked count: 1347 Waited count: 1618 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@73f930c7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 684 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 684 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 684 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 688 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 36029): State: TIMED_WAITING Blocked count: 0 Waited count: 684 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6d4b38a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592fc1c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@152e21a5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@5f6ec91f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5/current/BP-1420643299-172.17.0.2-1734035773582): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28981250 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@3c2ad454[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:52655): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 342 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 13 Waited count: 767 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33724647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:52655):): State: WAITING Blocked count: 0 Waited count: 868 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a631fb2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 892 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5201803c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a3c732d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 494 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:52655)): State: RUNNABLE Blocked count: 61 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 6 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76de061b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3701261f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 2 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 138 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 4 Waited count: 139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b5d587f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 269 Waited count: 1025 Waiting on java.util.concurrent.Semaphore$NonfairSync@5ecd8ec9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@757c448c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43067): State: WAITING Blocked count: 33 Waited count: 217 Waiting on java.util.concurrent.Semaphore$NonfairSync@73947ae6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43067): State: WAITING Blocked count: 42 Waited count: 10347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d1b9b55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f12fff9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f12fff9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@68dd8cb3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@384e9804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@299e6d6b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43067): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@419bb296 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 289 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 43 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;0a737713f6e3:43067): State: TIMED_WAITING Blocked count: 6 Waited count: 3776 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1011) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown(AbstractFSWALProvider.java:184) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:272) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/0a737713f6e3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@240dc3a0): State: TIMED_WAITING Blocked count: 0 Waited count: 227 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6765 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 397 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 20 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 36 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fd9fe95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67635 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 55 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1236bdde Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 477 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@163387d9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@169efedc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/0a737713f6e3:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b334a53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 12 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 562 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67225 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 608 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 609 (region-location-2): State: WAITING Blocked count: 6 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 610 (region-location-3): State: WAITING Blocked count: 5 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1019 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 488 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 31 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1108 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1123 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 64 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@568240db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1182 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1183 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1184 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1540 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 33 Waiting on java.util.TaskQueue@6f227bb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2077 (region-location-4): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c4cc8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4879 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4880 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4881 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8753 (AsyncFSWAL-1-hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData-prefix:0a737713f6e3,43067,1734035780320): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ec11293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8757 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8761 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8765 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doShutdown(AsyncFSWAL.java:793) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:995) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:990) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8766 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL$$Lambda$1116/0x00007fe305130000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-12T20:47:46,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/WALs/0a737713f6e3,43067,1734035780320/0a737713f6e3%2C43067%2C1734035780320.1734035783556 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T20:47:46,999 ERROR [WAL-Shutdown-0 {}] wal.AsyncFSWAL(794): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-12T20:47:46,999 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T20:47:46,999 INFO [M:0;0a737713f6e3:43067 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-12T20:47:46,999 INFO [M:0;0a737713f6e3:43067 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:43067 2024-12-12T20:47:47,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37615/user/jenkins/test-data/943a9ef3-35b3-a21e-04d9-f7d9acbe6427/MasterData/WALs/0a737713f6e3,43067,1734035780320/0a737713f6e3%2C43067%2C1734035780320.1734035783556 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-12T20:47:47,082 DEBUG [M:0;0a737713f6e3:43067 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/0a737713f6e3,43067,1734035780320 already deleted, retry=false 2024-12-12T20:47:47,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T20:47:47,191 INFO [M:0;0a737713f6e3:43067 {}] regionserver.HRegionServer(1307): Exiting; stopping=0a737713f6e3,43067,1734035780320; zookeeper connection closed. 2024-12-12T20:47:47,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43067-0x1001bf54bb60000, quorum=127.0.0.1:52655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T20:47:47,232 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@48db9f4a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T20:47:47,232 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@440e14e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T20:47:47,233 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T20:47:47,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78feedc7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T20:47:47,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69723e75{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,STOPPED} 2024-12-12T20:47:47,235 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T20:47:47,235 WARN [BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T20:47:47,235 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T20:47:47,235 WARN [BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1420643299-172.17.0.2-1734035773582 (Datanode Uuid ae32363f-ed8f-43e2-b299-eff2c4d0d5e1) service to localhost/127.0.0.1:37615 2024-12-12T20:47:47,237 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data5/current/BP-1420643299-172.17.0.2-1734035773582 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T20:47:47,237 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data6/current/BP-1420643299-172.17.0.2-1734035773582 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T20:47:47,238 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T20:47:47,248 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b749234{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T20:47:47,249 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70160452{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T20:47:47,249 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T20:47:47,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2536e78d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T20:47:47,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c62626f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,STOPPED} 2024-12-12T20:47:47,256 WARN [BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T20:47:47,256 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T20:47:47,256 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T20:47:47,256 WARN [BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1420643299-172.17.0.2-1734035773582 (Datanode Uuid f388b13f-08be-4585-a5ba-acb0b5f227dc) service to localhost/127.0.0.1:37615 2024-12-12T20:47:47,256 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data3/current/BP-1420643299-172.17.0.2-1734035773582 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T20:47:47,257 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data4/current/BP-1420643299-172.17.0.2-1734035773582 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T20:47:47,257 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T20:47:47,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7725692e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T20:47:47,261 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40605cbf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T20:47:47,261 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T20:47:47,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f65c51d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T20:47:47,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77fcc65c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,STOPPED} 2024-12-12T20:47:47,264 WARN [BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T20:47:47,264 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T20:47:47,264 WARN [BP-1420643299-172.17.0.2-1734035773582 heartbeating to localhost/127.0.0.1:37615 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1420643299-172.17.0.2-1734035773582 (Datanode Uuid 1c167605-940f-454c-99b2-09758889a2c6) service to localhost/127.0.0.1:37615 2024-12-12T20:47:47,264 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T20:47:47,264 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data1/current/BP-1420643299-172.17.0.2-1734035773582 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T20:47:47,265 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/cluster_f7200db0-dcdc-c952-1a9c-84f48e6c6b47/dfs/data/data2/current/BP-1420643299-172.17.0.2-1734035773582 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T20:47:47,265 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T20:47:47,274 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4addcb12{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T20:47:47,274 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47697099{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T20:47:47,274 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T20:47:47,275 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34c62ed9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T20:47:47,275 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1caa172f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/9804dbef-ad1f-a1f3-1fd1-a759bfd08a30/hadoop.log.dir/,STOPPED} 2024-12-12T20:47:47,293 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-12T20:47:47,606 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down